Compute Library
 23.08
FullyConnectedLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "FullyConnectedLayer.h"
25 
26 #include "arm_compute/core/Types.h"
28 
30 
31 #include <numeric>
32 
33 namespace arm_compute
34 {
35 namespace test
36 {
37 namespace validation
38 {
39 namespace reference
40 {
41 namespace
42 {
43 // Vector matrix multiply for floating point
44 template < typename T, typename TB, typename std::enable_if < is_floating_point<T>::value &&is_floating_point<TB>::value, int >::type = 0 >
45 void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst, int cols_weights,
46  int rows_weights)
47 {
48  const T *src_ptr = src.data() + offset_src;
49  const T *weights_ptr = weights.data();
50  const TB *bias_ptr = bias.data();
51  T *dst_ptr = dst.data() + offset_dst;
52 #if defined(_OPENMP)
53  #pragma omp parallel for
54 #endif /* _OPENMP */
55  for(int y = 0; y < rows_weights; ++y)
56  {
57  dst_ptr[y] = std::inner_product(src_ptr, src_ptr + cols_weights, &weights_ptr[cols_weights * y], static_cast<T>(0)) + bias_ptr[y];
58  }
59 }
60 
61 // Vector matrix multiply for quantized type
62 template < typename T, typename TB, typename std::enable_if < (std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value) &&std::is_same<TB, int32_t>::value, int >::type = 0 >
63 void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst,
64  int cols_weights, int rows_weights)
65 {
66  const T *src_ptr = src.data() + offset_src;
67  const T *weights_ptr = weights.data();
68  const TB *bias_ptr = bias.data();
69  T *dst_ptr = dst.data() + offset_dst;
70 
71  const UniformQuantizationInfo iq_info = src.quantization_info().uniform();
72  const UniformQuantizationInfo wq_info = weights.quantization_info().uniform();
73  const UniformQuantizationInfo oq_info = dst.quantization_info().uniform();
74 
75  const int input_offset = -iq_info.offset;
76  const float input_scale = iq_info.scale;
77  const int weights_offset = -wq_info.offset;
78  const float weights_scale = wq_info.scale;
79  const int output_offset = oq_info.offset;
80  const float output_scale = oq_info.scale;
81 
82  int output_multiplier = 0;
83  int output_shift = 0;
84  const float multiplier = input_scale * weights_scale / output_scale;
85  arm_compute::quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
86 
87  const int min = std::numeric_limits<T>::lowest();
88  const int max = std::numeric_limits<T>::max();
89 #if defined(_OPENMP)
90  #pragma omp parallel for
91 #endif /* _OPENMP */
92  for(int y = 0; y < rows_weights; ++y)
93  {
94  // Reset accumulator
95  int32_t acc = 0;
96 
97  for(int x = 0; x < cols_weights; ++x)
98  {
99  acc += (src_ptr[x] + input_offset) * (weights_ptr[x + y * cols_weights] + weights_offset);
100  }
101 
102  // Accumulate the bias
103  acc += bias_ptr[y];
104 
105  // Quantize down
106  acc = quantize_down_scale_by_fixedpoint(acc, output_multiplier, output_shift, output_offset, min, max);
107 
108  // Store the result
109  dst_ptr[y] = static_cast<T>(acc);
110  }
111 }
112 } // namespace
113 
114 template <typename T, typename TB>
116 {
117  // if no explicit quantization has been set you the same as src
118  if(out_quant_info == QuantizationInfo())
119  {
120  out_quant_info = src.quantization_info();
121  }
122 
123  // Create reference
124  SimpleTensor<T> dst{ TensorShape{ dst_shape }, src.data_type(), 1, out_quant_info };
125 
126  // Health checks
127  const int num_batch_dimensions = std::max(0, static_cast<int>(dst_shape.num_dimensions()) - 1);
128  const int num_input_dimensions = src.shape().num_dimensions() - num_batch_dimensions;
129  const unsigned int linear_input_size = src.shape().total_size_lower(num_input_dimensions);
130 
131  ARM_COMPUTE_UNUSED(num_batch_dimensions);
132  ARM_COMPUTE_UNUSED(num_input_dimensions);
133  ARM_COMPUTE_UNUSED(linear_input_size);
134  ARM_COMPUTE_ERROR_ON(weights.shape().x() != linear_input_size);
135  ARM_COMPUTE_ERROR_ON(weights.shape().y() != bias.shape().x());
136  ARM_COMPUTE_ERROR_ON(weights.shape().y() != dst.shape().x());
137 
138  // Compute reference
139  const int cols_weights = weights.shape().x();
140  const int rows_weights = weights.shape().y();
141  const int num_batches = dst_shape.total_size_upper(1);
142 
143  for(int k = 0; k < num_batches; ++k)
144  {
145  const int offset_in = k * cols_weights;
146  const int offset_out = k * rows_weights;
147 
148  vector_matrix_multiply<T>(src,
149  weights,
150  bias,
151  dst,
152  offset_in,
153  offset_out,
154  cols_weights,
155  rows_weights);
156  }
157 
158  return dst;
159 }
160 
162  QuantizationInfo out_quant_info);
164  QuantizationInfo out_quant_info);
166  QuantizationInfo out_quant_info);
168  QuantizationInfo out_quant_info);
169 } // namespace reference
170 } // namespace validation
171 } // namespace test
172 } // namespace arm_compute
arm_compute::test::validation::dst_shape
TensorShape dst_shape
Definition: DFT.cpp:164
arm_compute::test::validation::src
SimpleTensor< float > src
Definition: DFT.cpp:155
type
decltype(strategy::transforms) typedef type
Definition: gemm_interleaved.hpp:261
arm_compute::test::validation::reference::fully_connected_layer
SimpleTensor< T > fully_connected_layer(const SimpleTensor< T > &src, const SimpleTensor< T > &weights, const SimpleTensor< TB > &bias, const TensorShape &dst_shape, QuantizationInfo out_quant_info)
Definition: FullyConnectedLayer.cpp:115
arm_compute::QuantizationInfo
Quantization information.
Definition: QuantizationInfo.h:68
arm_compute::TensorShape
Shape of a tensor.
Definition: TensorShape.h:39
arm_compute::test::validation::dst
auto dst
Definition: DFT.cpp:170
arm_compute::support::cpp11::lowest
T lowest()
Definition: ToolchainSupport.h:277
Types.h
arm_compute::test::SimpleTensor::shape
TensorShape shape() const override
Shape of the tensor.
Definition: SimpleTensor.h:329
arm_compute::test::validation::k
const unsigned int k
Definition: GEMMMatrixMultiplyNative.cpp:361
arm_compute::TensorShape::total_size_upper
size_t total_size_upper(size_t dimension) const
Collapses given dimension and above.
Definition: TensorShape.h:186
ARM_COMPUTE_ERROR_ON
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:467
FullyConnectedLayer.h
bias
const int32_t * bias
Definition: working_space.hpp:322
ARM_COMPUTE_UNUSED
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
UtilsQuantizedAsymm.h
AsymmHelpers.h
arm_compute::test::SimpleTensor
Simple tensor object that stores elements in a consecutive chunk of memory.
Definition: SimpleTensor.h:58
arm_compute
Copyright (c) 2017-2023 Arm Limited.
Definition: introduction.dox:24
arm_compute::quantization::calculate_quantized_multiplier
Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplier, int32_t *shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier.
Definition: AsymmHelpers.cpp:41
arm_compute::Dimensions::num_dimensions
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:143
arm_compute::test::validation::quantize_down_scale_by_fixedpoint
int32_t quantize_down_scale_by_fixedpoint(int32_t val, int32_t result_mult_int, int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max)
Quantize down the input value in range [min, max].
Definition: UtilsQuantizedAsymm.h:72