ArmNN
 24.08
StridedSliceLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 // Copyright © 2018 The TensorFlow Authors. All Rights Reserved.
6 // SPDX-License-Identifier: Apache-2.0
7 //
8 #include "StridedSliceLayer.hpp"
9 
10 #include "LayerCloneBase.hpp"
11 
13 
16 
17 namespace armnn
18 {
19 
21  : LayerWithParameters(1, 1, LayerType::StridedSlice, param, name)
22 {
23 }
24 
25 std::unique_ptr<IWorkload> StridedSliceLayer::CreateWorkload(const IWorkloadFactory& factory) const
26 {
27  StridedSliceQueueDescriptor descriptor;
28 
29  descriptor.m_Parameters.m_Begin = m_Param.m_Begin;
30  descriptor.m_Parameters.m_End = m_Param.m_End;
32 
33  // Optional parameters
39 
40  SetAdditionalInfo(descriptor);
41 
42  return factory.CreateWorkload(LayerType::StridedSlice, descriptor, PrepInfoAndDesc(descriptor));
43 }
44 
46 {
47  return CloneBase<StridedSliceLayer>(graph, m_Param, GetName());
48 }
49 
50 // Content in this function (fixes related to NewAxisMask and EllipsisMask) are paraphrased from:
51 // tensorflow/tensorflow/lite/kernels/strided_slice.cc from the function BuildStridedSliceParams
52 std::vector<TensorShape> StridedSliceLayer::InferOutputShapes(
53  const std::vector<TensorShape>& inputShapes) const
54 {
55  if (inputShapes.size() != 1)
56  {
57  throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
58  "\" - should be \"1\".");
59  }
60 
61  TensorShape inputShape = inputShapes[0];
62  std::vector<unsigned int> outputShape;
63  unsigned int amountDimShrunk{0};
64 
65  // Getting the actual number of output dimensions, including axes added with the NewAxisMask
66  unsigned int outputDims = inputShape.GetNumDimensions();
67  for(unsigned int i = 0; i < m_Param.m_Begin.size(); ++i)
68  {
69  // Adding to dimension count for every set bit of NewAxisMask not covered by the EllipsisMask
70  if(m_Param.m_NewAxisMask & (1 << i) && !(m_Param.m_EllipsisMask & (1 << i)))
71  {
72  ++outputDims;
73  }
74  }
75 
76  // Modifying the EllipsisMask based on the NewAxisMask (expand for any newly added axes)
77  // and the NewAxisMask based on the EllipsisMask (offset based on the expanded ellipsis)
78  int realEllipsisMask = 0, realNewAxisMask = 0;
79  // The number of bits the ellipsis mask was expanded by
80  unsigned int ellipsisExpandedBy = 0;
81  for(unsigned int i = 0; i < outputDims; ++i)
82  {
83  if(m_Param.m_EllipsisMask & (1 << i))
84  {
85  // The end index of the expanded ellipsis mask (start is at i)
86  // End Index calculation - i+1 (for non-expanded ellipsis) + outputDims-inputDims (number of added dims)
87  unsigned int endIdx = std::min(i + 1u + outputDims - inputShape.GetNumDimensions(), outputDims);
88 
89  // Calculation: the total size of the mask -1 for the already existing bit in the original mask
90  ellipsisExpandedBy = endIdx - i - 1;
91 
92  // Setting mask bit to 1 for the entire expanded ellipsis
93  for(; i < endIdx; ++i)
94  {
95  realEllipsisMask |= (1 << i);
96  }
97  }
98 
99  // Setting the real NewAxisMask based on the expanded ellipsis size
100  if(m_Param.m_NewAxisMask & (1 << (i - ellipsisExpandedBy)))
101  {
102  realNewAxisMask |= (1 << i);
103  }
104  }
105 
106  // The backwards offset by which i is ahead of the actual inputTensor dimension
107  unsigned int inputDimOffset = 0;
108  // Iterating through the parameters and inferring output shape
109  for (unsigned int i = 0; i < outputDims; ++i)
110  {
111  // Add entire dimension if EllipsisMask is set
112  if(realEllipsisMask & (1 << i))
113  {
114  outputShape.push_back(inputShape[i - inputDimOffset]);
115  continue;
116  }
117  // Add dimension of length 1 if NewAxisMask is set
118  if(realNewAxisMask & (1 << i))
119  {
120  outputShape.push_back(1);
121  ++inputDimOffset;
122  continue;
123  }
124  // Fill the rest of the inferred shape (dimensions greater than the input shape)
125  if(i >= inputShape.GetNumDimensions())
126  {
127  // If EllipsisMask was set at any point, the TensorFlow behavior is to fill the rest of the tensor with 1
128  // Otherwise, the remaining dimensions from the inputShape (which were skipped over) are used
129  if(realEllipsisMask > 0)
130  {
131  outputShape.push_back(1);
132  }
133  else
134  {
135  outputShape.push_back(inputShape[i - inputDimOffset]);
136  }
137  continue;
138  }
139 
140  int stride = m_Param.m_Stride[i];
141  int start = m_Param.GetStartForAxis(inputShape, i);
142  int stop = m_Param.GetStopForAxis(inputShape, i, start);
143 
144  if (m_Param.m_ShrinkAxisMask & (1 << i))
145  {
146  amountDimShrunk+=1;
147 
148  // If the difference between the start point and the end point of the slice on an axis being shrunk
149  // is greater than 1 then throw an error as the output will not be large enough to hold the slice
150  if (((m_Param.m_Begin[i] - m_Param.m_End[i]) > 1) || ((m_Param.m_Begin[i] - m_Param.m_End[i]) < -1))
151  {
153  "StridedSlice: Attempting to take a larger slice than can fit in inferred output");
154  }
155 
156  if (stride < 0)
157  {
159  "StridedSlice: Stride can not be negative with Shrink Axis Mask set.");
160  }
161  continue;
162  }
163 
164  int newSize = stride > 0 ? ((stop - start) + stride - 1) / stride :
165  ((start - stop) - stride - 1) / -stride;
166 
167  // Making sure the dimension size doesn't go out of bounds
168  newSize = std::max(0, newSize);
169  newSize = std::min(newSize, armnn::numeric_cast<int>(inputShape[i - inputDimOffset]));
170 
171  outputShape.push_back(armnn::numeric_cast<unsigned int>(newSize));
172  }
173 
174  if (outputShape.size() == 0 && (inputShape.GetNumDimensions() - amountDimShrunk) == 0)
175  {
176  outputShape.push_back(1);
177  }
178 
179  return std::vector<TensorShape>({
180  TensorShape(armnn::numeric_cast<unsigned int>(outputShape.size()), &outputShape[0]) });
181 }
182 
184 {
186 
187  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
188 
190 
191  auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetTensorInfo().GetShape()});
192 
193  if (inferredShapes.size() != 1)
194  {
195  throw armnn::LayerValidationException("inferredShapes has "
196  + std::to_string(inferredShapes.size()) +
197  " elements - should only have 1.");
198  }
199 
200  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StridedSliceLayer");
201 }
202 
204 {
205  strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
206 }
207 
208 } // namespace armnn
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:100
WorkloadData.hpp
armnn::StridedSliceDescriptor::m_Begin
std::vector< int > m_Begin
Begin values for the input that will be sliced.
Definition: Descriptors.hpp:1342
armnn::StridedSliceDescriptor::m_EllipsisMask
int32_t m_EllipsisMask
Ellipsis mask value.
Definition: Descriptors.hpp:1357
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::Layer::ValidateAndCopyShape
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:457
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
armnn::StridedSliceDescriptor::m_BeginMask
int32_t m_BeginMask
Begin mask value.
Definition: Descriptors.hpp:1350
armnn::IStrategy
Definition: IStrategy.hpp:16
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::LayerWithParameters< StridedSliceDescriptor >::GetParameters
const StridedSliceDescriptor & GetParameters() const override
Definition: LayerWithParameters.hpp:19
WorkloadFactory.hpp
NumericCast.hpp
armnn::LayerWithParameters
Definition: LayerWithParameters.hpp:14
armnn::StridedSliceLayer::Clone
StridedSliceLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
Definition: StridedSliceLayer.cpp:45
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
armnn::StridedSliceDescriptor::GetStopForAxis
int GetStopForAxis(const TensorShape &inputShape, unsigned int axis, int startForAxis) const
Definition: Descriptors.cpp:420
armnn::InputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:614
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::LayerWithParameters< StridedSliceDescriptor >::m_Param
StridedSliceDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
Definition: LayerWithParameters.hpp:52
armnn::QueueDescriptorWithParameters::m_Parameters
LayerDescriptor m_Parameters
Definition: WorkloadData.hpp:66
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::StridedSliceLayer::ValidateTensorShapesFromInputs
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of StridedSliceLayer.
Definition: StridedSliceLayer.cpp:183
armnn::StridedSliceQueueDescriptor
Definition: WorkloadData.hpp:467
armnn::LayerWithParameters< StridedSliceDescriptor >::PrepInfoAndDesc
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
Definition: LayerWithParameters.hpp:44
armnn::LayerValidationException
Definition: Exceptions.hpp:105
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
armnn::Layer::VerifyShapeInferenceType
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:526
armnn::Layer::SetAdditionalInfo
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:303
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::StridedSliceDescriptor::m_EndMask
int32_t m_EndMask
End mask value.
Definition: Descriptors.hpp:1353
armnn::StridedSliceDescriptor::m_ShrinkAxisMask
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
Definition: Descriptors.hpp:1355
armnn::StridedSliceLayer
This layer represents a strided slice operation.
Definition: StridedSliceLayer.hpp:13
armnn::StridedSliceDescriptor::m_Stride
std::vector< int > m_Stride
Stride values for the input that will be sliced.
Definition: Descriptors.hpp:1346
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::StridedSliceLayer::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the StridedSlice type.
Definition: StridedSliceLayer.cpp:25
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::StridedSliceDescriptor::GetStartForAxis
int GetStartForAxis(const TensorShape &inputShape, unsigned int axis) const
Definition: Descriptors.cpp:393
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::StridedSlice
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor &params, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: StridedSlice.cpp:86
armnn::StridedSliceDescriptor::m_End
std::vector< int > m_End
End values for the input that will be sliced.
Definition: Descriptors.hpp:1344
StridedSliceLayer.hpp
armnn::Layer::VerifyLayerConnections
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:410
armnn::StridedSliceLayer::StridedSliceLayer
StridedSliceLayer(const StridedSliceDescriptor &param, const char *name)
Constructor to create a StridedSliceLayer.
Definition: StridedSliceLayer.cpp:20
armnn::StridedSliceLayer::InferOutputShapes
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
Definition: StridedSliceLayer.cpp:52
armnn::StridedSliceLayer::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: StridedSliceLayer.cpp:203
armnn::Layer::m_ShapeInferenceMethod
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:441
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::StridedSliceDescriptor::m_NewAxisMask
int32_t m_NewAxisMask
New axis mask value.
Definition: Descriptors.hpp:1360
armnn::Graph
Definition: Graph.hpp:30
armnn::IWorkloadFactory::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
armnn::IStrategy::ExecuteStrategy
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
LayerCloneBase.hpp