ArmNN
 25.02
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
TransposeConvolution2dLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "LayerCloneBase.hpp"
8 
10 
13 
14 using namespace armnnUtils;
15 
16 namespace armnn
17 {
18 
20  const char* name)
22 {
23 }
24 
25 std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
26 {
27  if (!m_Weight)
28  {
29  throw armnn::NullPointerException("TransposeConvolution2dLayer: Weights data should not be null.");
30  }
31 
33  descriptor.m_Weight = m_Weight.get();
34 
36  {
37  if (!m_Bias)
38  {
39  throw armnn::NullPointerException("TransposeConvolution2dLayer: Bias data should not be null.");
40  }
41  descriptor.m_Bias = m_Bias.get();
42  }
43 
44  SetAdditionalInfo(descriptor);
45 
46  return factory.CreateWorkload(LayerType::TransposeConvolution2d, descriptor, PrepInfoAndDesc(descriptor));
47 }
48 
50 {
51  auto layer = CloneBase<TransposeConvolution2dLayer>(graph, m_Param, GetName());
52 
53  layer->m_Weight = m_Weight ? m_Weight : nullptr;
54 
55  if (layer->m_Param.m_BiasEnabled)
56  {
57  layer->m_Bias = m_Bias ? m_Bias : nullptr;
58  }
59 
60  return std::move(layer);
61 }
62 
64  const std::vector<TensorShape>& inputShapes) const
65 {
66  if (inputShapes.size() != 2)
67  {
68  throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
69  "\" - should be \"2\".");
70  }
71 
72  const TensorShape& inputShape = inputShapes[0];
73  const TensorShape& kernelShape = inputShapes[1];
74 
75  if (inputShape.GetNumDimensions() != 4)
76  {
77  throw armnn::Exception("Transpose convolutions will always have 4D input");
78  }
79 
80  DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
81 
82  const unsigned int batches = inputShape[0];
83 
84  const unsigned int wInput = inputShape[dataLayoutIndex.GetWidthIndex()];
85  const unsigned int hInput = inputShape[dataLayoutIndex.GetHeightIndex()];
86 
87  const unsigned int wKernel = kernelShape[dataLayoutIndex.GetWidthIndex()];
88  const unsigned int hKernel = kernelShape[dataLayoutIndex.GetHeightIndex()];
89 
90  unsigned int wPadding = m_Param.m_PadLeft + m_Param.m_PadRight;
91  unsigned int hPadding = m_Param.m_PadTop + m_Param.m_PadBottom;
92 
93  unsigned int wOutput = (wInput - 1) * m_Param.m_StrideX + wKernel - wPadding;
94  unsigned int hOutput = (hInput - 1) * m_Param.m_StrideY + hKernel - hPadding;
95  unsigned int cOutput = kernelShape[0];
96 
98  TensorShape( { batches, hOutput, wOutput, cOutput } ) :
99  TensorShape( { batches, cOutput, hOutput, wOutput });
100 
101  return std::vector<TensorShape>({ tensorShape });
102 }
103 
105 {
107 
108  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
109 
111 
112  if (!m_Weight)
113  {
114  throw armnn::LayerValidationException("TransposeConvolution2dLayer: Weight data cannot be null.");
115  }
116 
117  std::vector<TensorShape> expectedOutputShape;
118  std::vector<TensorShape> outputShapeGivenAsInput;
119 
120  expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetTensorInfo().GetShape(),
121  m_Weight->GetTensorInfo().GetShape() });
122 
123  if (expectedOutputShape.size() != 1)
124  {
125  throw armnn::LayerValidationException("expectedOutputShape' size is "
126  + std::to_string(expectedOutputShape.size()) +
127  " - should be \"1\".");
128  }
129 
130  // If output_shape was specified then use it rather than calculate an inferred output shape.
132  {
133  TensorShape shapeAsTensorShape(static_cast<unsigned int>(m_Param.m_OutputShape.size()),
134  m_Param.m_OutputShape.data());
135  outputShapeGivenAsInput.push_back(shapeAsTensorShape);
136 
137  if (outputShapeGivenAsInput.size() != 1)
138  {
139  throw armnn::LayerValidationException("outputShapeGivenAsInput' size is "
140  + std::to_string(outputShapeGivenAsInput.size()) +
141  " - should be \"1\".");
142  }
143 
144  if (expectedOutputShape != outputShapeGivenAsInput)
145  {
146  throw armnn::LayerValidationException("TransposeConvolution2dLayer: "
147  "output calculated by InferOutputShapes and the output given "
148  "as an input parameter to the layer are not matching");
149  }
150  }
151 
152  ValidateAndCopyShape(outputShape, expectedOutputShape[0], m_ShapeInferenceMethod, "TransposeConvolution2dLayer");
153 }
154 
156 {
157  // For API stability DO NOT ALTER order and add new members to the end of vector
158  return {m_Weight, m_Bias};
159 }
160 
162 {
163  ManagedConstTensorHandle managedWeight(m_Weight);
164  std::vector<armnn::ConstTensor> constTensors { { managedWeight.GetTensorInfo(), managedWeight.Map() } };
165 
166  ManagedConstTensorHandle managedBias(m_Bias);
167  if (GetParameters().m_BiasEnabled)
168  {
169  constTensors.emplace_back(ConstTensor(managedBias.GetTensorInfo(), managedBias.Map()));
170  }
171 
172  strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
173 }
174 
175 } // namespace armnn
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:330
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:47
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > >> ImmutableConstantTensors
Definition: INetwork.hpp:141
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:614
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:410
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:526
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:457
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:303
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:441
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
TransposeConvolution2dDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const TransposeConvolution2dDescriptor & GetParameters() const override
const TensorInfo & GetTensorInfo() const
const void * Map(bool blocking=true)
RAII Managed resource Unmaps MemoryArea once out of scope.
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:100
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
This layer represents a 2D transpose convolution operation.
TransposeConvolution2dLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
ImmutableConstantTensors GetConstantTensorsByRef() const override
Retrieve the handles to the constant values stored by the layer.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store weight values.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shapes from given input shapes and layer properties.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of TransposeConvolution2dLayer.
std::shared_ptr< ConstTensorHandle > m_Bias
A unique pointer to store bias values.
TransposeConvolution2dLayer(const TransposeConvolution2dDescriptor &param, const char *name)
Constructor to create a TransposeConvolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the TransposeConvolution2d type.
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
unsigned int GetWidthIndex() const
unsigned int GetHeightIndex() const
Copyright (c) 2021 ARM Limited and Contributors.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:494
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_OutputShape
bool m_OutputShapeEnabled
Output shape if it has been specified.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
bool m_BiasEnabled
Enable/disable bias.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.