24.02
|
Go to the documentation of this file.
31 return CloneBase<GatherNdLayer>(graph,
GetName());
49 unsigned int index_depth = indices[indicesDim - 1];
53 std::vector<unsigned int> outer_shape;
54 outer_shape.reserve(indicesDim - 1);
55 for (
unsigned int i = 0; i < indicesDim - 1; ++i)
57 outer_shape.emplace_back(indices[i]);
61 std::vector<unsigned int> inner_shape;
62 inner_shape.reserve(paramsDim - index_depth);
63 for (
unsigned int i = index_depth; i < paramsDim; ++i)
65 inner_shape.emplace_back(params[i]);
69 std::vector<unsigned int> output_shape;
70 output_shape.reserve( outer_shape.size() + inner_shape.size() );
71 output_shape.insert( output_shape.end(), outer_shape.begin(), outer_shape.end() );
72 output_shape.insert( output_shape.end(), inner_shape.begin(), inner_shape.end() );
74 const auto outputDim =
static_cast<unsigned int>(output_shape.size());
75 return std::vector<TensorShape>({
TensorShape({outputDim, output_shape.data()})});
#define ARMNN_ASSERT(COND)
const TensorInfo & GetTensorInfo() const override
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Gather type.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
const char * GetName() const override
Returns the name of the layer.
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
GatherNdLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
GatherNdLayer(const char *name)
Constructor to create a GatherNdLayer.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shapes from given input shapes and layer properties.
const TensorShape & GetShape() const
This layer represents a GatherNd operator.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of GatherNdLayer.
Copyright (c) 2021 ARM Limited and Contributors.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Dimensionality GetDimensionality() const
Function that returns the tensor type.
ShapeInferenceMethod m_ShapeInferenceMethod
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.