24.02
|
Go to the documentation of this file.
28 auto layer = CloneBase<BatchMatMulLayer>(graph,
m_Param,
GetName());
30 return std::move(layer);
55 inputXShape : inputYShape;
57 inputYShape : inputXShape;
63 std::vector<unsigned int> tensorDimensions(outputNumDimensions, 0);
70 for (
unsigned int i = 0; i < outputNumDimensions; ++i)
72 if (i == longerAxesToMul.first)
74 tensorDimensions[i] = &shorterInput == &inputXShape ? inputXShape[i - inputNumDimsOffset] : inputXShape[i];
76 else if(i == longerAxesToMul.second)
78 tensorDimensions[i] = &shorterInput == &inputYShape ? inputYShape[i - inputNumDimsOffset] : inputYShape[i];
83 tensorDimensions[i] =
static_cast<int>(i) -
static_cast<int>(inputNumDimsOffset) < 0 ?
85 std::max(longerInput[i], shorterInput[i - inputNumDimsOffset]);
89 auto outputShape =
TensorShape(outputNumDimensions, tensorDimensions.data());
90 return std::vector<TensorShape>({ outputShape });
#define ARMNN_ASSERT(COND)
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
const TensorInfo & GetTensorInfo() const override
bool m_AdjointX
Adjoint the slices of each input tensor Transpose and Adjoint can not both be set to true for the sam...
static std::pair< unsigned int, unsigned int > GetAxesToMul(DataLayout dataLayout, const TensorShape &tensorShape)
Static helper to get the two axes (for each input) for multiplication.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shapes will lead to a valid configuration of BatchMatMulLayer.
DataLayout m_DataLayoutX
Data layout of each input tensor, such as NHWC/NDHWC (leave as default for arbitrary layout)
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
static PermutationVector GetPermuteVec(DataLayout dataLayout, const TensorShape &tensorShape)
Static helper to get the axes which will be transposed.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
BatchMatMulLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
const char * GetName() const override
Returns the name of the layer.
BatchMatMulDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shape from the given input shapes.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the BatchMatMul type.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
A BatchMatMulDescriptor for the BatchMatMul operator.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
const TensorShape & GetShape() const
Copyright (c) 2021 ARM Limited and Contributors.
BatchMatMulLayer(const BatchMatMulDescriptor ¶m, const char *name)
Constructor to create a BatchMatMulLayer.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
ShapeInferenceMethod m_ShapeInferenceMethod
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.