24.08
|
Go to the documentation of this file.
31 return CloneBase<GatherNdLayer>(graph,
GetName());
36 if (inputShapes.size() != 2)
38 throw armnn::Exception(
"inputShapes' size is \"" + std::to_string(inputShapes.size()) +
39 "\" - should be \"2\".");
54 unsigned int index_depth = indices[indicesDim - 1];
55 if (index_depth > paramsDim)
58 + std::to_string(index_depth) +
60 + std::to_string(paramsDim) +
"\")");
64 std::vector<unsigned int> outer_shape;
65 outer_shape.reserve(indicesDim - 1);
66 for (
unsigned int i = 0; i < indicesDim - 1; ++i)
68 outer_shape.emplace_back(indices[i]);
72 std::vector<unsigned int> inner_shape;
73 inner_shape.reserve(paramsDim - index_depth);
74 for (
unsigned int i = index_depth; i < paramsDim; ++i)
76 inner_shape.emplace_back(params[i]);
80 std::vector<unsigned int> output_shape;
81 output_shape.reserve( outer_shape.size() + inner_shape.size() );
82 output_shape.insert( output_shape.end(), outer_shape.begin(), outer_shape.end() );
83 output_shape.insert( output_shape.end(), inner_shape.begin(), inner_shape.end() );
85 const auto outputDim =
static_cast<unsigned int>(output_shape.size());
86 return std::vector<TensorShape>({
TensorShape({outputDim, output_shape.data()})});
101 if (inferredShapes.size() != 1)
104 + std::to_string(inferredShapes.size()) +
105 " elements - should only have 1.");
const TensorInfo & GetTensorInfo() const override
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Gather type.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
const char * GetName() const override
Returns the name of the layer.
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
GatherNdLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
GatherNdLayer(const char *name)
Constructor to create a GatherNdLayer.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Base class for all ArmNN exceptions so that users can filter to just those.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shapes from given input shapes and layer properties.
const TensorShape & GetShape() const
This layer represents a GatherNd operator.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of GatherNdLayer.
Copyright (c) 2021 ARM Limited and Contributors.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Dimensionality GetDimensionality() const
Function that returns the tensor type.
ShapeInferenceMethod m_ShapeInferenceMethod
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.