36 if (inputShapes.size() != 2)
38 throw armnn::Exception(
"inputShapes' size is \"" + std::to_string(inputShapes.size()) +
39 "\" - should be \"2\".");
52 const unsigned int outputDim = paramsDim - 1 + indicesDim;
54 std::vector<unsigned int> dimSizes;
56 unsigned int axis =
static_cast<unsigned int>(
m_Param.m_Axis);
59 int32_t axis_aux =
static_cast<int32_t
>(paramsDim) +
m_Param.m_Axis;
60 axis =
static_cast<unsigned int> (axis_aux);
63 for (
unsigned int i = 0; i < axis; ++i)
65 dimSizes.push_back(params[i]);
67 for (
unsigned int i = axis; i < indicesDim + axis; ++i)
69 dimSizes.push_back(indices[i - axis]);
71 for (
unsigned int i = 1 + axis; i < paramsDim; ++i)
73 dimSizes.push_back(params[i]);
76 return std::vector<TensorShape>({
TensorShape({outputDim, dimSizes.data()})});
91 if (inferredShapes.size() != 1)
94 + std::to_string(inferredShapes.size()) +
95 " elements - should only have 1.");
Base class for all ArmNN exceptions so that users can filter to just those.
GatherLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shapes from given input shapes and layer properties.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s).
GatherLayer(const GatherDescriptor ¶m, const char *name)
Constructor to create a GatherLayer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Gather type.
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
LayerType * CloneBase(Graph &graph, Params &&... params) const
const char * GetName() const override
Returns the name of the layer.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
void SetAdditionalInfo(QueueDescriptor &descriptor) const
ShapeInferenceMethod m_ShapeInferenceMethod
LayerWithParameters(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const GatherDescriptor ¶m, const char *name)
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
const GatherDescriptor & GetParameters() const override
const TensorInfo & GetTensorInfo() const override
const TensorShape & GetShape() const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Copyright (c) 2021 ARM Limited and Contributors.
void Gather(const TensorInfo ¶msInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< I > ¶ms, const int32_t *indices, Encoder< O > &output, const int32_t axis_int)
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
A GatherDescriptor for the GatherLayer.