24.08
|
Go to the documentation of this file.
29 return std::move(layer);
34 if (inputShapes.size() != 1)
36 throw armnn::Exception(
"inputShapes' size is \"" + std::to_string(inputShapes.size()) +
37 "\" - should be \"1\".");
43 std::vector<unsigned int> dimensionSizes;
44 dimensionSizes.reserve(numberOfDimensions);
49 for(uint32_t i = 0; i < numberOfDimensions; ++i)
59 return std::vector<TensorShape>({
TensorShape({numberOfDimensions, dimensionSizes.data()})});
72 if (inferredShapes.size() != 1)
75 + std::to_string(inferredShapes.size()) +
76 " elements - should only have 1.");
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Tile type.
const TensorInfo & GetTensorInfo() const override
TileLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor tile(s) will lead to a valid configuration of TileLayer.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
const char * GetName() const override
Returns the name of the layer.
TileDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shapes from given input shapes and layer properties.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Base class for all ArmNN exceptions so that users can filter to just those.
void Tile(const TileDescriptor ¶ms, const TensorInfo &inputInfo, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
TileLayer(const TileDescriptor ¶m, const char *name)
Constructor to create a TileLayer.
std::vector< uint32_t > m_Multiples
The vector to multiply the input shape by.
const TensorShape & GetShape() const
Copyright (c) 2021 ARM Limited and Contributors.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
ShapeInferenceMethod m_ShapeInferenceMethod
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.