55 if (layer->m_Param.m_BiasEnabled)
60 return std::move(layer);
64 const std::vector<TensorShape>& inputShapes)
const
66 if (inputShapes.size() != 2)
68 throw armnn::Exception(
"inputShapes' size is \"" + std::to_string(inputShapes.size()) +
69 "\" - should be \"2\".");
82 const unsigned int batches = inputShape[0];
84 const unsigned int wInput = inputShape[dataLayoutIndex.
GetWidthIndex()];
85 const unsigned int hInput = inputShape[dataLayoutIndex.
GetHeightIndex()];
87 const unsigned int wKernel = kernelShape[dataLayoutIndex.
GetWidthIndex()];
88 const unsigned int hKernel = kernelShape[dataLayoutIndex.
GetHeightIndex()];
93 unsigned int wOutput = (wInput - 1) *
m_Param.m_StrideX + wKernel - wPadding;
94 unsigned int hOutput = (hInput - 1) *
m_Param.m_StrideY + hKernel - hPadding;
95 unsigned int cOutput = kernelShape[0];
98 TensorShape( { batches, hOutput, wOutput, cOutput } ) :
99 TensorShape( { batches, cOutput, hOutput, wOutput });
101 return std::vector<TensorShape>({ tensorShape });
117 std::vector<TensorShape> expectedOutputShape;
118 std::vector<TensorShape> outputShapeGivenAsInput;
121 m_Weight->GetTensorInfo().GetShape() });
123 if (expectedOutputShape.size() != 1)
126 + std::to_string(expectedOutputShape.size()) +
127 " - should be \"1\".");
131 if (
m_Param.m_OutputShapeEnabled)
133 TensorShape shapeAsTensorShape(
static_cast<unsigned int>(
m_Param.m_OutputShape.size()),
135 outputShapeGivenAsInput.push_back(shapeAsTensorShape);
137 if (outputShapeGivenAsInput.size() != 1)
140 + std::to_string(outputShapeGivenAsInput.size()) +
141 " - should be \"1\".");
144 if (expectedOutputShape != outputShapeGivenAsInput)
147 "output calculated by InferOutputShapes and the output given "
148 "as an input parameter to the layer are not matching");
164 std::vector<armnn::ConstTensor> constTensors { { managedWeight.
GetTensorInfo(), managedWeight.
Map() } };
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Base class for all ArmNN exceptions so that users can filter to just those.
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > > > ImmutableConstantTensors
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
LayerType * CloneBase(Graph &graph, Params &&... params) const
const char * GetName() const override
Returns the name of the layer.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
void SetAdditionalInfo(QueueDescriptor &descriptor) const
ShapeInferenceMethod m_ShapeInferenceMethod
LayerWithParameters(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const TransposeConvolution2dDescriptor ¶m, const char *name)
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
const TransposeConvolution2dDescriptor & GetParameters() const override
TransposeConvolution2dDescriptor m_Param
const void * Map(bool blocking=true)
RAII Managed resource Unmaps MemoryArea once out of scope.
const TensorInfo & GetTensorInfo() const
const TensorInfo & GetTensorInfo() const override
const TensorShape & GetShape() const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
TransposeConvolution2dLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
ImmutableConstantTensors GetConstantTensorsByRef() const override
Retrieve the handles to the constant values stored by the layer.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store weight values.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shapes from given input shapes and layer properties.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of TransposeConvolution2dLayer.
std::shared_ptr< ConstTensorHandle > m_Bias
A unique pointer to store bias values.
TransposeConvolution2dLayer(const TransposeConvolution2dDescriptor ¶m, const char *name)
Constructor to create a TransposeConvolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the TransposeConvolution2d type.
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
unsigned int GetWidthIndex() const
unsigned int GetHeightIndex() const
Copyright (c) 2021 ARM Limited and Contributors.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const ConstTensorHandle * m_Bias
const ConstTensorHandle * m_Weight