ArmNN
 25.02
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
SpaceToBatchNdLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "LayerCloneBase.hpp"
8 
11 
12 #include <numeric>
13 
14 using namespace armnnUtils;
15 
16 namespace armnn
17 {
18 
20  : LayerWithParameters(1, 1, LayerType::SpaceToBatchNd, param, name)
21 {}
22 
23 std::unique_ptr<IWorkload> SpaceToBatchNdLayer::CreateWorkload(const IWorkloadFactory& factory) const
24 {
28  SetAdditionalInfo(descriptor);
29 
30  return factory.CreateWorkload(LayerType::SpaceToBatchNd, descriptor, PrepInfoAndDesc(descriptor));
31 }
32 
34 {
35  IgnoreUnused(graph);
36  return CloneBase<SpaceToBatchNdLayer>(graph, m_Param, GetName());
37 }
38 
39 std::vector<TensorShape> SpaceToBatchNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
40 {
41  const TensorShape inputShape = inputShapes[0];
42  TensorShape outputShape(inputShape);
43 
44  outputShape[0] = inputShape[0] * std::accumulate(m_Param.m_BlockShape.begin(),
45  m_Param.m_BlockShape.end(),
46  1U,
47  std::multiplies<>());
48 
49  // In a 4D tensor, there will be 2 spatialDimensions (H and W), and the for loop will run twice.
50  // In a 3D tensor, there will be 1 spatialDimensions, and the for loop will run once.
51  unsigned int firstSpatialDimension = m_Param.m_DataLayout == DataLayout::NCHW ? 2 : 1;
52  for (unsigned int i = 0; i < m_Param.m_BlockShape.size(); ++i)
53  {
54  unsigned int spatialDimension = firstSpatialDimension + i;
55  outputShape[spatialDimension] =
56  (inputShape[spatialDimension] + m_Param.m_PadList[i].first + m_Param.m_PadList[i].second)
57  / m_Param.m_BlockShape[i];
58  }
59 
60  return std::vector<TensorShape>({ outputShape });
61 }
62 
64 {
66 
67  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
68 
70 
71  std::vector<TensorShape> inferredShapes = InferOutputShapes({
73 
74  if (inferredShapes.size() != 1)
75  {
76  throw armnn::LayerValidationException("inferredShapes has "
77  + std::to_string(inferredShapes.size()) +
78  " elements - should only have 1.");
79  }
80 
81  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToBatchNdLayer");
82 }
83 
85 {
86  strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
87 }
88 
89 } // namespace
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:614
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:410
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:526
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:457
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:303
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:441
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
SpaceToBatchNdDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const SpaceToBatchNdDescriptor & GetParameters() const override
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:100
This layer represents a SpaceToBatchNd operation.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
SpaceToBatchNdLayer(const SpaceToBatchNdDescriptor param, const char *name)
Constructor to create a SpaceToBatchNdLayer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of SpaceToBatchNdLayer.
SpaceToBatchNdLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the SpaceToBatchNd type.
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:494
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
std::vector< unsigned int > m_BlockShape
Block shape value.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left,...