Compute Library
 20.08
neon_gemm_qasymm8.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
29 #include "utils/Utils.h"
31 
32 #include <cstdlib>
33 
34 using namespace arm_compute;
35 using namespace utils;
36 
37 // Find min and max value in a float array
38 void find_min_max(int size, const float *data, float *min, float *max)
39 {
40  *min = *max = data[0];
41  for(int i = 0; i < size; i++)
42  {
43  const float val = data[i];
44  *min = std::min(*min, val);
45  *max = std::max(*max, val);
46  }
47 }
48 
49 // Return reasonable quantisation parameters to use for an array of floats
50 // based on min and max values
52 {
53  // Extend the [min,max] interval to contain 0 so we can represent it exactly
54  min = std::min(min, 0.f);
55  max = std::max(max, 0.f);
56 
57  // Set the quantized min and max in float values
58  const float qmin = 0;
59  const float qmax = 255;
60 
61  // Determine the scale
62  const float scale = (max - min) / (qmax - qmin);
63 
64  // Determine the zero-point; using affine equation val = (qval-zerop) * scale
65  const float zero_point_real = qmin - min / scale;
66 
67  // But we need to nudge the zero_point to an integer (exact quantized value)
68  std::uint8_t zero_point_nudged = 0;
69  if(zero_point_real < qmin)
70  {
71  zero_point_nudged = qmin;
72  }
73  else if(zero_point_real > qmax)
74  {
75  zero_point_nudged = qmax;
76  }
77  else
78  {
79  zero_point_nudged = static_cast<std::uint8_t>(support::cpp11::round(zero_point_real));
80  }
81 
82  QuantizationInfo qinfo = QuantizationInfo(scale, zero_point_nudged);
83  return qinfo;
84 }
85 
86 void quantize_values(int size, qasymm8_t *output, float *input, const QuantizationInfo qinfo)
87 {
88  for(int i = 0; i < size; i++)
89  {
90  output[i] = quantize_qasymm8(input[i], qinfo);
91  }
92  std::cout << "\n";
93 }
94 
95 int main(int argc, char **argv)
96 {
97  Tensor src1;
98  Tensor src2;
99  Tensor dst0;
100  Tensor q_src1;
101  Tensor q_src2;
102  Tensor q_dst0;
103  Tensor q_res;
104  Tensor q_res_output;
105  size_t M = 4;
106  size_t N = 4;
107  size_t K = 4;
108  bool default_input = true;
109 
110  // Parse args
111  if(argc < 3) /* case default matrix sizes */
112  {
113  // Print help
114  std::cout << "Usage: ./build/neon_gemm_qasymm8 M N K\n";
115  std::cout << "Too few or no inputs provided. Using default M=4, N=4, K=4\n\n";
116  }
117  else /* case M N K arguments provided */
118  {
119  M = strtol(argv[1], nullptr, 10);
120  N = strtol(argv[2], nullptr, 10);
121  K = strtol(argv[3], nullptr, 10);
122  default_input = false;
123  }
124 
125  /*** Floating point matrix multiplication ***/
126 
127  // Initialise input matrices
128  NEGEMM fgemm{};
129 
130  src1.allocator()->init(TensorInfo(TensorShape(K, M), 1, DataType::F32));
131  src2.allocator()->init(TensorInfo(TensorShape(N, K), 1, DataType::F32));
132  dst0.allocator()->init(TensorInfo(TensorShape(N, M), 1, DataType::F32));
133  fgemm.configure(&src1, &src2, nullptr, &dst0, 1, 0);
134 
135  // Allocate matrices
136  src1.allocator()->allocate();
137  src2.allocator()->allocate();
138  dst0.allocator()->allocate();
139 
140  // Fill in tensors, by default fill in with known data - for easy testing
141  auto *src1_ptr = reinterpret_cast<float *>(src1.buffer());
142  auto *src2_ptr = reinterpret_cast<float *>(src2.buffer());
143  auto *dst0_ptr = reinterpret_cast<float *>(dst0.buffer());
144 
145  // Fill in: one is the identity matrix, other is sequential values
146  // src1: Identity matrix
147  for(size_t i = 0; i < M * K; i++) {
148  src1_ptr[i] = 0;
149  }
150  for(size_t i = 0; i < M; i++) {
151  src1_ptr[i * K + i] = 1.0f;
152  }
153 
154  // src2: Sequential values matrix
155  for(size_t i = 0; i < K * N; i++) {
156  src2_ptr[i] = i * 1.123f;
157  }
158 
159  // Otherwise if M, N, K is given, fill in with random values
160  if(!default_input)
161  {
162  fill_random_tensor(src1, 0.f, 1.f);
163  fill_random_tensor(src2, 0.f, 1.f);
164  }
165 
166  // Run single precision gemm and print result
167  fgemm.run();
168 
169 #if ARM_COMPUTE_DEBUG_ENABLED
170  std::cout << "Result matrix:\n";
171  src1.print(std::cout);
172  src2.print(std::cout);
173  dst0.print(std::cout);
174 #endif // ARM_COMPUTE_DEBUG_ENABLED
175 
176  /*** Quantised asymmetric 8bit matrix multiplication ***/
177 
178  // Start by finding the quantisation parameters for each set of values
179  float src1_min;
180  float src1_max;
181  float src2_min;
182  float src2_max;
183  float dst0_min;
184  float dst0_max;
185 
186  find_min_max(M * K, src1_ptr, &src1_min, &src1_max);
187  find_min_max(K * N, src2_ptr, &src2_min, &src2_max);
188  find_min_max(M * N, dst0_ptr, &dst0_min, &dst0_max);
189 
190  const QuantizationInfo src1_qinfo = choose_quantization_params(src1_min, src1_max);
191  const QuantizationInfo src2_qinfo = choose_quantization_params(src2_min, src2_max);
192  const QuantizationInfo dst0_qinfo = choose_quantization_params(dst0_min, dst0_max);
193 
194  std::cout << "Matrix 1: min=" << src1_min << ", max=" << src1_max << ", ";
195  std::cout << "QuantisationInfo(" << src1_qinfo.scale()[0] << ", " << src1_qinfo.offset()[0] << ")\n";
196  std::cout << "Matrix 2: min=" << src2_min << ", max=" << src2_max << ", ";
197  std::cout << "QuantisationInfo(" << src2_qinfo.scale()[0] << ", " << src2_qinfo.offset()[0] << ")\n";
198  std::cout << "Result : min=" << dst0_min << ", max=" << dst0_max << ", ";
199  std::cout << "QuantisationInfo(" << dst0_qinfo.scale()[0] << ", " << dst0_qinfo.offset()[0] << ")\n";
200 
201  // We now have the quantisation info and can configure the quantised tensors
202  q_src1.allocator()->init(TensorInfo(TensorShape(K, M), 1, DataType::QASYMM8, src1_qinfo));
203  q_src2.allocator()->init(TensorInfo(TensorShape(N, K), 1, DataType::QASYMM8, src2_qinfo));
204  q_dst0.allocator()->init(TensorInfo(TensorShape(N, M), 1, DataType::QASYMM8, dst0_qinfo));
205 
206  // In this approach we use the QuantizationLayer construct to perform quantization
210  q1.configure(&src1, &q_src1);
211  q2.configure(&src2, &q_src2);
212  q3.configure(&dst0, &q_dst0);
213 
214  // Configure low precision gemm and initialise result tensor (pre-output)
216  q_res.allocator()->init(TensorInfo(TensorShape(N, M), 1, DataType::S32));
217  qgemm.configure(&q_src1, &q_src2, nullptr, &q_res);
218 
219  // Configure output stage after computing shift and multiplier parameters
221  int output_multiplier;
222  int output_shift;
223  float multiplier = (src1_qinfo.uniform().scale * src2_qinfo.uniform().scale) / dst0_qinfo.uniform().scale;
224  quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
225  std::cout << "(q_multiplier, q_shift) = (" << output_multiplier << ", " << output_shift << ")\n\n";
226  gemmlowp_output_stage.configure(&q_res, nullptr, &q_res_output, output_multiplier, output_shift, dst0_qinfo.uniform().offset);
227 
228  // Allocate all tensors
229  q_src1.allocator()->allocate();
230  q_src2.allocator()->allocate();
231  q_dst0.allocator()->allocate();
232  q_res.allocator()->allocate();
233  q_res_output.allocator()->allocate();
234 
235  // Run quantization layers (quantizes values of each tensor)
236  q1.run();
237  q2.run();
238  q3.run();
239  // Run low precision matrix multiply kernel
240  qgemm.run();
241  // Run output stage kernel
242  gemmlowp_output_stage.run();
243  std::cout << "Done\n";
244 
245 #if ARM_COMPUTE_DEBUG_ENABLED
246  // Print quantized source matrices
247  q_src1.print(std::cout);
248  q_src2.print(std::cout);
249  // Print result matrix in int32 form - before output stage processing
250  std::cout << "Lowp GEMM output (int32):\n";
251  q_res.print(std::cout);
252  // Print QASYMM8 (quantized) matrix
253  std::cout << "Output pipeline result matrix:\n";
254  q_res_output.print(std::cout);
255 
256  // Expected result
257  std::cout << "Expected result:\n";
258  q_dst0.print(std::cout);
259 #endif // ARM_COMPUTE_DEBUG_ENABLED
260 }
const std::vector< int32_t > & offset() const
Offset vector accessor.
Shape of a tensor.
Definition: TensorShape.h:39
void run() override final
Run the kernels contained in the function.
Basic function to execute GEMM on NEON.
Definition: NEGEMM.h:59
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
uint8_t quantize_qasymm8(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given an unsigned 8-bit asymmetric quantization scheme.
Basic function to execute NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint on NEON.
void quantize_values(int size, qasymm8_t *output, float *input, const QuantizationInfo qinfo)
1 channel, 1 F32 per channel
Basic function to simulate a quantization layer.
void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, int min=std::numeric_limits< int32_t >::lowest(), int max=std::numeric_limits< int32_t >::max())
Initialise the kernel's inputs, output.
Copyright (c) 2017-2020 Arm Limited.
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
Definition: Tensor.cpp:48
int main(int argc, char **argv)
void fill_random_tensor(T &tensor, float lower_bound, float upper_bound)
Definition: Utils.h:729
1 channel, 1 S32 per channel
Quantization information.
void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *output, const GEMMInfo &gemm_info=GEMMInfo())
Initialise the kernel's inputs, output.
void run() override
Run the kernels contained in the function.
quantized, asymmetric fixed-point 8-bit number unsigned
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
UniformQuantizationInfo uniform() const
Return per layer quantization info.
const std::vector< float > & scale() const
Scale vector accessor.
Basic implementation of the tensor interface.
Definition: Tensor.h:37
void configure(const ITensor *input, ITensor *output)
Set the input and output tensors.
T round(T value)
Round floating-point value with half value rounding away from zero.
const QuantizationInfo qinfo
Definition: Im2Col.cpp:150
uint8_t qasymm8_t
8 bit quantized asymmetric scalar value
uint8_t * buffer() const override
Interface to be implemented by the child class to return a pointer to CPU memory.
Definition: Tensor.cpp:43
Store the tensor's metadata.
Definition: TensorInfo.h:45
void print(std::ostream &s, IOFormatInfo io_fmt=IOFormatInfo()) const
Print a tensor to a given stream using user defined formatting information.
SimpleTensor< T > scale(const SimpleTensor< T > &src, float scale_x, float scale_y, InterpolationPolicy policy, BorderMode border_mode, T constant_border_value, SamplingPolicy sampling_policy, bool ceil_policy_scale, bool align_corners)
Definition: Scale.cpp:185
Status calculate_quantized_multiplier_less_than_one(float multiplier, int32_t *quant_multiplier, int32_t *right_shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier with value less than one.
Basic function to execute GEMMLowpMatrixMultiplyCore on NEON.
QuantizationInfo choose_quantization_params(float min, float max)
void find_min_max(int size, const float *data, float *min, float *max)