ArmNN
 25.11
Loading...
Searching...
No Matches
Pooling3dLayer.cpp
Go to the documentation of this file.
1//
2// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "Pooling3dLayer.hpp"
7
8#include "LayerCloneBase.hpp"
9
10#include <armnn/TypesUtils.hpp>
11
13
16
17using namespace armnnUtils;
18
19namespace armnn
20{
21
23 : LayerWithParameters(1, 1, LayerType::Pooling3d, param, name)
24{
25}
26
27std::unique_ptr<IWorkload> Pooling3dLayer::CreateWorkload(const IWorkloadFactory& factory) const
28{
29 Pooling3dQueueDescriptor descriptor;
30 SetAdditionalInfo(descriptor);
31
32 return factory.CreateWorkload(LayerType::Pooling3d, descriptor, PrepInfoAndDesc(descriptor));
33}
34
39
40std::vector<TensorShape> Pooling3dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
41{
42 if (inputShapes.size() != 1)
43 {
44 throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
45 "\" - should be \"1\".");
46 }
47
48 const TensorShape& inputShape = inputShapes[0];
49 const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
50
51 // If we support multiple batch dimensions in the future, then this assert will need to change.
52 if (inputShape.GetNumDimensions() != 5)
53 {
54 throw armnn::Exception("Pooling3dLayer will always have 5D input.");
55 }
56
57 unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
58 unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
59 unsigned int inDepth = inputShape[dimensionIndices.GetDepthIndex()];
60 unsigned int inChannels = inputShape[dimensionIndices.GetChannelsIndex()];
61 unsigned int inBatchSize = inputShape[0];
62
63 bool isGlobalPooling = (m_Param.m_StrideX==0 && m_Param.m_StrideY==0 && m_Param.m_StrideZ==0);
64 unsigned int outWidth = 1;
65 unsigned int outHeight = 1;
66 unsigned int outDepth = 1;
67 if (!isGlobalPooling)
68 {
69 if (!m_Param.m_StrideX || !m_Param.m_StrideY || !m_Param.m_StrideZ)
70 {
71 throw armnn::Exception("Stride can only be zero when performing global pooling");
72 }
73
74 auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto outputShapeRounding)
75 {
76 unsigned int readSize = inSize + lowPad + highPad - poolSize;
77 float div = static_cast<float>(readSize) / static_cast<float>(stride);
78
79 unsigned int size = 0;
80 switch (outputShapeRounding)
81 {
83 size = static_cast<unsigned int>(ceil(div)) + 1;
84 break;
85 case OutputShapeRounding ::Floor:
86 size = static_cast<unsigned int>(floor(div)) + 1;
87 break;
88 default:
89 throw armnn::Exception("Unsupported Output Shape Rounding");
90 }
91
92 // Makes sure that border operations will start from inside the input and not the padded area.
93 // This is what CL does...
94 if ((size - 1)*stride >= inSize + lowPad)
95 {
96 --size;
97 }
98
99 return size;
100 };
101
102 outWidth = CalcSize(inWidth, m_Param.m_PadLeft, m_Param.m_PadRight, m_Param.m_PoolWidth, m_Param.m_StrideX,
103 m_Param.m_OutputShapeRounding);
104 outHeight = CalcSize(inHeight, m_Param.m_PadTop, m_Param.m_PadBottom, m_Param.m_PoolHeight, m_Param.m_StrideY,
105 m_Param.m_OutputShapeRounding);
106 outDepth = CalcSize(inDepth, m_Param.m_PadFront, m_Param.m_PadBack, m_Param.m_PoolDepth, m_Param.m_StrideZ,
107 m_Param.m_OutputShapeRounding);
108 }
109 unsigned int outChannels = inChannels;
110 unsigned int outBatchSize = inBatchSize;
111
112 TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NDHWC ?
113 TensorShape( { outBatchSize, outDepth, outHeight, outWidth, outChannels } ) :
114 TensorShape( { outBatchSize, outChannels, outDepth, outHeight, outWidth });
115
116 return std::vector<TensorShape>({ tensorShape });
117}
118
120{
122
123 const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
124
126
127 auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
128
129 if (inferredShapes.size() != 1)
130 {
131 throw armnn::LayerValidationException("inferredShapes has "
132 + std::to_string(inferredShapes.size()) +
133 " elements - should only have 1.");
134 }
135
136 ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling3dLayer");
137}
138
140{
141 strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
142}
143
144} // namespace armnn
#define CHECK_LOCATION()
Base class for all ArmNN exceptions so that users can filter to just those.
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition Layer.cpp:614
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition Layer.cpp:410
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition Layer.hpp:337
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition Layer.cpp:526
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition Layer.hpp:339
LayerType * CloneBase(Graph &graph, Params &&... params) const
const char * GetName() const override
Returns the name of the layer.
Definition Layer.hpp:332
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition Layer.cpp:457
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition Layer.cpp:303
ShapeInferenceMethod m_ShapeInferenceMethod
Definition Layer.hpp:441
LayerWithParameters(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const Pooling3dDescriptor &param, const char *name)
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
const Pooling3dDescriptor & GetParameters() const override
const TensorInfo & GetTensorInfo() const override
Definition Layer.cpp:100
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of Pooling3dLayer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Pooling3d type.
Pooling3dLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
Pooling3dLayer(const Pooling3dDescriptor &param, const char *name)
Constructor to create a Pooling3dLayer.
const TensorShape & GetShape() const
Definition Tensor.hpp:193
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition Tensor.cpp:174
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
unsigned int GetHeightIndex() const
unsigned int GetChannelsIndex() const
Copyright (c) 2021 ARM Limited and Contributors.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition Types.hpp:494
void Pooling3d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling3dDescriptor &params)
Computes the Pooling3d operation.
A Pooling3dDescriptor for the Pooling3dLayer.