31 const std::vector<TensorShape>& inputShapes =
37 unsigned int inputChannels = filterShape[1];
38 unsigned int filterWidth = filterShape[3];
39 unsigned int filterHeight = filterShape[2];
40 unsigned int depthMultiplier = filterShape[0];
42 fn(
"FilterWidth",std::to_string(filterWidth));
43 fn(
"FilterHeight",std::to_string(filterHeight));
44 fn(
"DepthMultiplier",std::to_string(depthMultiplier));
45 fn(
"InputChannels",std::to_string(inputChannels));
61 return std::move(layer);
64std::vector<TensorShape>
67 if (inputShapes.size() != 2)
69 throw armnn::Exception(
"inputShapes' size is \"" + std::to_string(inputShapes.size()) +
70 "\" - should be \"2\".");
94 unsigned int inputBatchSize = inputShape[0];
95 unsigned int inputHeight = inputShape[dataLayoutIndex.
GetHeightIndex()];
96 unsigned int inputWidth = inputShape[dataLayoutIndex.
GetWidthIndex()];
101 unsigned int filterHeight = filterShape[1];
102 unsigned int dilatedFilterHeight = filterHeight + (
m_Param.m_DilationY - 1) * (filterHeight - 1);
103 unsigned int readHeight = (inputHeight +
m_Param.m_PadTop +
m_Param.m_PadBottom) - dilatedFilterHeight;
104 unsigned int outputHeight = 1 + (readHeight /
m_Param.m_StrideY);
106 unsigned int filterWidth = filterShape[2];
107 unsigned int dilatedFilterWidth = filterWidth + (
m_Param.m_DilationX - 1) * (filterWidth - 1);
108 unsigned int readWidth = (inputWidth +
m_Param.m_PadLeft +
m_Param.m_PadRight) - dilatedFilterWidth;
109 unsigned int outputWidth = 1 + (readWidth /
m_Param.m_StrideX);
111 unsigned int outputChannels = filterShape[3];
112 unsigned int outputBatchSize = inputBatchSize;
115 TensorShape{ outputBatchSize, outputHeight, outputWidth, outputChannels } :
116 TensorShape{ outputBatchSize, outputChannels, outputHeight, outputWidth };
118 return std::vector<TensorShape>{ tensorShape };
139 if (inferredShapes.size() != 1)
142 + std::to_string(inferredShapes.size()) +
143 " elements - should only have 1.");
ImmutableConstantTensors GetConstantTensorsByRef() const override
Retrieve the handles to the constant values connected to the layer.
void SerializeLayerParameters(ParameterStringifyFunction &fn) const override
Helper to serialize the layer parameters to string.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
DepthwiseConvolution2dLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of DepthwiseConvolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the DepthwiseConvolution2d type.
DepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor ¶m, const char *name)
Constructor to create a DepthwiseConvolution2dLayer.
Base class for all ArmNN exceptions so that users can filter to just those.
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > > > ImmutableConstantTensors
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
LayerType * CloneBase(Graph &graph, Params &&... params) const
const char * GetName() const override
Returns the name of the layer.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
void SetAdditionalInfo(QueueDescriptor &descriptor) const
ShapeInferenceMethod m_ShapeInferenceMethod
LayerWithParameters(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const DepthwiseConvolution2dDescriptor ¶m, const char *name)
void SerializeLayerParameters(ParameterStringifyFunction &fn) const override
Helper to serialize the layer parameters to string (currently used in DotSerializer and company).
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
const DepthwiseConvolution2dDescriptor & GetParameters() const override
DepthwiseConvolution2dDescriptor m_Param
Layer::ImmutableConstantTensors GetConnectedConstantAsInputTensors() const
const TensorInfo & GetTensorInfo() const override
const TensorShape & GetShape() const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
unsigned int GetWidthIndex() const
unsigned int GetHeightIndex() const
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t GetNumInputs(bool biasEnabled)
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Depthwise Convolution 2D layer workload data.