Compute Library
 22.11
FullyConnectedLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Utils.h"
29 
30 namespace arm_compute
31 {
32 namespace graph
33 {
34 FullyConnectedLayerNode::FullyConnectedLayerNode(unsigned int num_outputs, QuantizationInfo out_quant_info, FullyConnectedLayerInfo fc_info, FastMathHint fast_math_hint)
35  : _num_outputs(num_outputs), _out_quant_info(std::move(out_quant_info)), _info(fc_info), _fast_math_hint(fast_math_hint)
36 {
37  _input_edges.resize(3, EmptyEdgeID);
38  _outputs.resize(1, NullTensorID);
39 }
41 {
42  _fast_math_hint = hint;
43 }
44 
46 {
47  return _fast_math_hint;
48 }
49 
51 {
52  _info.activation_info = fused_activation;
53 }
54 
56  unsigned int num_outputs,
58  const QuantizationInfo &weights_quant_info)
59 {
60  unsigned int num_weights = 1;
61  unsigned int num_dimensions = input_descriptor.shape.num_dimensions();
62  // Ignore the batch dimension if there is one:
63  if(num_dimensions == 2 || num_dimensions == 4)
64  {
65  num_dimensions--;
66  }
67  for(unsigned int i = 0; i < num_dimensions; i++)
68  {
69  num_weights *= input_descriptor.shape[i];
70  }
71 
72  TensorDescriptor weights_descriptor = input_descriptor;
73  weights_descriptor.shape = TensorShape(num_weights, num_outputs);
74 
75  // If weights are tranposed, use tranposed shape
76  if(!fc_info.transpose_weights)
77  {
78  weights_descriptor.shape = TensorShape(num_outputs, num_weights);
79  }
80 
81  // Set quantization info if present
82  if(!weights_quant_info.empty())
83  {
84  weights_descriptor.quant_info = weights_quant_info;
85  }
86 
87  return weights_descriptor;
88 }
89 
91  unsigned int num_outputs,
92  const QuantizationInfo &out_quant_info)
93 {
94  // Note: Only 1D batch space is supported at the moment
95  unsigned int batches = input_descriptor.shape[1];
96  if(input_descriptor.shape.num_dimensions() > 2)
97  {
98  batches = input_descriptor.shape[3];
99  }
100 
101  // Set descriptor shape
102  TensorDescriptor output_descriptor = input_descriptor;
103  output_descriptor.shape = TensorShape(num_outputs, batches);
104 
105  // Set quantization info if present
106  if(!out_quant_info.empty())
107  {
108  output_descriptor.quant_info = out_quant_info;
109  }
110 
111  return output_descriptor;
112 }
113 
115 {
116  return _info;
117 }
118 
120 {
121  if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
122  {
123  Tensor *dst = output(0);
124  ARM_COMPUTE_ERROR_ON(dst == nullptr);
125  dst->desc() = configure_output(0);
126  return true;
127  }
128  return false;
129 }
130 
132 {
133  ARM_COMPUTE_UNUSED(idx);
134  const Tensor *src = input(0);
135  ARM_COMPUTE_ERROR_ON(src == nullptr);
136 
137  return compute_output_descriptor(src->desc(), _num_outputs, _out_quant_info);
138 }
139 
141 {
143 }
144 
146 {
147  v.visit(*this);
148 }
149 } // namespace graph
150 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
FastMathHint fast_math_hint() const
Fast math hint accessor.
size_t num_outputs() const
Returns number of outputs of the node.
Definition: INode.cpp:184
FullyConnectedLayerInfo info() const
Fully connected layer addition information.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
void accept(INodeVisitor &v) override
Accepts a node visitor.
Fully connected layer info.
Definition: Types.h:1809
Activation Layer Information class.
Definition: Types.h:1639
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2022 Arm Limited.
FullyConnectedLayerNode(unsigned int num_outputs, QuantizationInfo out_quant_info=QuantizationInfo(), FullyConnectedLayerInfo fc_info=FullyConnectedLayerInfo(), FastMathHint fast_math_hint=FastMathHint::Disabled)
Constructor.
TensorDescriptor & desc()
TensorInfo metadata accessor.
Definition: Tensor.cpp:40
Quantization information.
QuantizationInfo quant_info
Quantization info.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual void visit(INode &n)=0
Visit INode.
Tensor * output(size_t idx) const
Returns the tensor of a given output of the node.
Definition: INode.cpp:159
TensorDescriptor configure_output(size_t idx) const override
Calculates output configuration.
TensorID input_id(size_t idx) const
Returns the tensor ID of a given input of the node.
Definition: INode.cpp:138
static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor, unsigned int num_outputs, const QuantizationInfo &out_quant_info=QuantizationInfo())
Computes fully connected layer output descriptor.
NodeType
Supported nodes.
Definition: Types.h:199
ActivationLayerInfo activation_info
Fused activation to apply after the matrix multiplication.
Definition: Types.h:1812
constexpr EdgeID EmptyEdgeID
Definition: Types.h:78
FastMathHint
Enable or disable fast math for Convolution layer.
Definition: Types.h:143
NodeType type() const override
Returns node&#39;s type.
bool empty() const
Indicates whether this QuantizationInfo has valid settings or not.
void set_fast_math_hint(FastMathHint hint)
Sets the fast math fast hint.
void set_fused_activation(ActivationLayerInfo fused_activation)
Sets fused activation.
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:143
bool forward_descriptors() override
Forwards descriptor information to outputs if possible.
TensorID output_id(size_t idx) const
Returns the tensor ID of a given output of the node.
Definition: INode.cpp:145
bool transpose_weights
Transpose weights if true.
Definition: Types.h:1815
unsigned int batches
static TensorDescriptor compute_weights_descriptor(const TensorDescriptor &input_descriptor, unsigned int num_outputs, FullyConnectedLayerInfo fc_info=FullyConnectedLayerInfo(), const QuantizationInfo &weights_quant_info=QuantizationInfo())
Computes weights descriptor.
constexpr TensorID NullTensorID
Constant NodeID specifying an equivalent of null node.
Definition: Types.h:74
Node visitor interface.
Definition: INodeVisitor.h:34
Tensor * input(size_t idx) const
Returns the tensor of a given input of the node.
Definition: INode.cpp:151
Tensor object.
Definition: Tensor.h:41