ArmNN
 25.11
Loading...
Searching...
No Matches
FullyConnectedLayer.cpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
6
7#include "LayerCloneBase.hpp"
8
13
14namespace armnn
15{
16
21
22std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const IWorkloadFactory& factory) const
23{
25 SetAdditionalInfo(descriptor);
26 return factory.CreateWorkload(LayerType::FullyConnected, descriptor, PrepInfoAndDesc(descriptor));
27}
28
30{
31 auto layer = CloneBase<FullyConnectedLayer>(graph, m_Param, GetName());
32 return std::move(layer);
33}
34
35std::vector<TensorShape> FullyConnectedLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
36{
37 if (inputShapes.size() != 2)
38 {
39 throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
40 "\" - should be \"2\".");
41 }
42
43 const TensorShape& inputShape = inputShapes[0];
44 const TensorShape weightShape = inputShapes[1];
45
46 // Output for FC is [1, w[1]].
47 unsigned int batches = inputShape[0];
48 unsigned int dimIdx = m_Param.m_TransposeWeightMatrix ? 0 : 1;
49
50 return std::vector<TensorShape>({ TensorShape({batches, weightShape[dimIdx]})});
51}
52
54{
55 const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
56
58
59 std::vector<TensorShape> inferredShapes = InferOutputShapes(
62
63 if (inferredShapes.size() != 1)
64 {
65 throw armnn::LayerValidationException("inferredShapes has "
66 + std::to_string(inferredShapes.size()) +
67 " elements - should only have 1.");
68 }
69
70 if (inferredShapes[0].GetDimensionality() != Dimensionality::Specified)
71 {
72 throw armnn::LayerValidationException("inferredShapes' dimensionality has not been specified.");
73 }
74
75 ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FullyConnectedLayer");
76}
77
83
85{
86 strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
87}
88
89} // namespace armnn
Base class for all ArmNN exceptions so that users can filter to just those.
ImmutableConstantTensors GetConstantTensorsByRef() const override
Retrieve the handles to the constant values stored by the layer.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs,...
FullyConnectedLayer(const FullyConnectedDescriptor &param, const char *name)
Constructor to create a FullyConnectedLayer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of FullyConnectedLayer.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the FullyConnected type.
FullyConnectedLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > > > ImmutableConstantTensors
Definition INetwork.hpp:141
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const =0
Backends should implement their own CreateWorkload function with a switch statement.
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition Layer.cpp:614
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition Layer.hpp:337
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition Layer.cpp:526
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition Layer.hpp:339
LayerType * CloneBase(Graph &graph, Params &&... params) const
const char * GetName() const override
Returns the name of the layer.
Definition Layer.hpp:332
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition Layer.cpp:457
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition Layer.cpp:303
ShapeInferenceMethod m_ShapeInferenceMethod
Definition Layer.hpp:441
LayerWithParameters(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const FullyConnectedDescriptor &param, const char *name)
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
const FullyConnectedDescriptor & GetParameters() const override
Layer::ImmutableConstantTensors GetConnectedConstantAsInputTensors() const
const TensorInfo & GetTensorInfo() const override
Definition Layer.cpp:100
const TensorShape & GetShape() const
Definition Tensor.hpp:193
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t GetNumInputs(bool biasEnabled)
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > *pBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition Types.hpp:494
A FullyConnectedDescriptor for the FullyConnectedLayer.