24.08
|
Go to the documentation of this file.
42 if (inputShapes.size() != 1)
44 throw armnn::Exception(
"inputShapes' size is \"" + std::to_string(inputShapes.size()) +
45 "\" - should be \"1\".");
57 unsigned int inWidth = inputShape[dimensionIndices.
GetWidthIndex()];
58 unsigned int inHeight = inputShape[dimensionIndices.
GetHeightIndex()];
59 unsigned int inDepth = inputShape[dimensionIndices.
GetDepthIndex()];
61 unsigned int inBatchSize = inputShape[0];
64 unsigned int outWidth = 1;
65 unsigned int outHeight = 1;
66 unsigned int outDepth = 1;
71 throw armnn::Exception(
"Stride can only be zero when performing global pooling");
74 auto CalcSize = [](
auto inSize,
auto lowPad,
auto highPad,
auto poolSize,
auto stride,
auto outputShapeRounding)
76 unsigned int readSize = inSize + lowPad + highPad - poolSize;
77 float div =
static_cast<float>(readSize) /
static_cast<float>(stride);
79 unsigned int size = 0;
80 switch (outputShapeRounding)
83 size =
static_cast<unsigned int>(
ceil(div)) + 1;
85 case OutputShapeRounding ::Floor:
86 size =
static_cast<unsigned int>(floor(div)) + 1;
94 if ((size - 1)*stride >= inSize + lowPad)
109 unsigned int outChannels = inChannels;
110 unsigned int outBatchSize = inBatchSize;
113 TensorShape( { outBatchSize, outDepth, outHeight, outWidth, outChannels } ) :
114 TensorShape( { outBatchSize, outChannels, outDepth, outHeight, outWidth });
116 return std::vector<TensorShape>({ tensorShape });
129 if (inferredShapes.size() != 1)
132 + std::to_string(inferredShapes.size()) +
133 " elements - should only have 1.");
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_PadTop
Padding top value in the height dimension.
const TensorInfo & GetTensorInfo() const override
A Pooling3dDescriptor for the Pooling3dLayer.
This layer represents a pooling 3d operation.
Pooling3dLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Pooling3dLayer(const Pooling3dDescriptor ¶m, const char *name)
Constructor to create a Pooling3dLayer.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
uint32_t m_PadBottom
Padding bottom value in the height dimension.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
const Pooling3dDescriptor & GetParameters() const override
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void Pooling3d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling3dDescriptor ¶ms)
Computes the Pooling3d operation.
const char * GetName() const override
Returns the name of the layer.
unsigned int GetHeightIndex() const
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Pooling3d type.
uint32_t m_PoolWidth
Pooling width value.
Pooling3dDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Base class for all ArmNN exceptions so that users can filter to just those.
unsigned int GetWidthIndex() const
uint32_t m_PadFront
Padding front value in the depth dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of Pooling3dLayer.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadBack
Padding back value in the depth dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
const TensorShape & GetShape() const
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
Copyright (c) 2021 ARM Limited and Contributors.
unsigned int GetChannelsIndex() const
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
uint32_t m_PoolDepth
Pooling depth value.
ShapeInferenceMethod m_ShapeInferenceMethod
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
unsigned int GetDepthIndex() const