Compute Library
 23.11
cl_gemm.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CL /* Needed by Utils.cpp to handle OpenCL exceptions properly */
25 #error "This example needs to be built with -DARM_COMPUTE_CL"
26 #endif /* ARM_COMPUTE_CL */
27 
28 #include "arm_compute/core/Types.h"
29 #include "arm_compute/core/Utils.h"
48 #include "tests/AssetsLibrary.h"
49 #include "tests/CL/CLAccessor.h"
50 #include "tests/Globals.h"
51 #include "tests/IAccessor.h"
52 #include "tests/SimpleTensor.h"
56 
57 #include "utils/TypePrinter.h"
58 #include "utils/Utils.h"
61 
62 #include "ValidateExample.h"
63 
64 #include <cstdlib>
65 
66 using namespace arm_compute;
67 using namespace utils;
68 using namespace arm_compute::test;
69 using namespace arm_compute::test::validation;
70 
71 constexpr float abs_tolerance_f32(0.0001f); /**< F32 Absolute tolerance value for comparing reference's output against implementation's output for
72  * floating point data types in case using relative tolerance fails because of small values */
73 RelativeTolerance<float> tolerance_f32(0.001f); /**< F32 Tolerance value for comparing reference's output against implementation's output for floating point data types */
74 RelativeTolerance<half_float::half> tolerance_f16(half(0.2)); /**< F16 Tolerance value for comparing reference's output against implementation's output for floating point data types */
75 constexpr float tolerance_num_f16 = 0.02f; /**< F16 Tolerance number */
76 
77 namespace
78 {
79 class GEMMCommandLineOptions final
80 {
81 public:
82  explicit GEMMCommandLineOptions(CommandLineParser &parser) noexcept
83  : help(parser.add_option<ToggleOption>("help")),
84  add_bias(parser.add_option<ToggleOption>("add_bias")),
85  M(parser.add_option<SimpleOption<int>>("m", 7)),
86  N(parser.add_option<SimpleOption<int>>("n", 3)),
87  K(parser.add_option<SimpleOption<int>>("k", 5)),
88  B(parser.add_option<SimpleOption<int>>("b", 1)),
89  alpha(parser.add_option<SimpleOption<float>>("alpha", 1.f)),
90  beta(parser.add_option<SimpleOption<float>>("beta", 0.f)),
91  offset_src0(parser.add_option<SimpleOption<int>>("offset_i0", 10)),
92  offset_src1(parser.add_option<SimpleOption<int>>("offset_i1", 10)),
93  offset_dst(parser.add_option<SimpleOption<int>>("offset_o", 10)),
94  scale_src0(parser.add_option<SimpleOption<float>>("scale_i0", 1.f / 255)),
95  scale_src1(parser.add_option<SimpleOption<float>>("scale_i1", 1.f / 255)),
96  scale_dst(parser.add_option<SimpleOption<float>>("scale_o", 1.f / 255)),
97  data_type()
98  {
99  // Setup data type
100  const std::set<arm_compute::DataType> supported_data_types
101  {
105  };
106  data_type = parser.add_option<EnumOption<DataType>>("type", supported_data_types, DataType::F32);
107 
108  // Setup help strings
109  help->set_help("Show this help message");
110  add_bias->set_help("Add bias to the GEMM. Used when running in QASYMM8");
111  M->set_help("M value");
112  N->set_help("N value");
113  K->set_help("K value");
114  B->set_help("B value - number of batches");
115  alpha->set_help("Alpha value");
116  beta->set_help("Beta value");
117  offset_src0->set_help("Offset of first input. Used when running in QASYMM8");
118  offset_src1->set_help("Offset of second input. Used when running in QASYMM8");
119  offset_dst->set_help("Offset of output. Used when running in QASYMM8");
120  scale_src0->set_help("Scale of first input. Used when running in QASYMM8");
121  scale_src1->set_help("Scale of second input. Used when running in QASYMM8");
122  scale_dst->set_help("Scale of output. Used when running in QASYMM8");
123  data_type->set_help("Data type to use");
124  }
125  /** Prevent instances of this class from being copied (As this class contains pointers) */
126  GEMMCommandLineOptions(const GEMMCommandLineOptions &) = delete;
127  /** Prevent instances of this class from being copied (As this class contains pointers) */
128  GEMMCommandLineOptions &operator=(const GEMMCommandLineOptions &) = delete;
129  /** Allow instances of this class to be moved */
130  GEMMCommandLineOptions(GEMMCommandLineOptions &&) noexcept(true) = default;
131  /** Allow instances of this class to be moved */
132  GEMMCommandLineOptions &operator=(GEMMCommandLineOptions &&) noexcept(true) = default;
133  /** Default destructor */
134  ~GEMMCommandLineOptions() = default;
135 
136 public:
137  ToggleOption *help;
138  ToggleOption *add_bias;
139  SimpleOption<int> *M;
140  SimpleOption<int> *N;
141  SimpleOption<int> *K;
142  SimpleOption<int> *B;
143  SimpleOption<float> *alpha;
144  SimpleOption<float> *beta;
145  SimpleOption<int> *offset_src0;
146  SimpleOption<int> *offset_src1;
147  SimpleOption<int> *offset_dst;
148  SimpleOption<float> *scale_src0;
149  SimpleOption<float> *scale_src1;
150  SimpleOption<float> *scale_dst;
151  EnumOption<arm_compute::DataType> *data_type;
152 };
153 } // namespace
154 
155 class CLGEMMValidateExample : public ValidateExample
156 {
157 public:
158  bool do_setup(int argc, char **argv) override
159  {
161 
162  // Parse options
163  CommandLineParser parser;
164  GEMMCommandLineOptions gemm_options(parser);
165  parser.parse(argc, argv);
166 
167  // Print help
168  const bool print_help = gemm_options.help->is_set() ? gemm_options.help->value() : false;
169  if(print_help)
170  {
171  parser.print_help(argv[0]);
172  return false;
173  }
174 
175  // Consume parameters
176  consume_params(gemm_options);
177  print_parameters_internal();
178 
179  const bool is_quantized = is_data_type_quantized(data_type);
180 
181  // Calculate re-quantization parameters
182  if(is_quantized)
183  {
184  float multiplier = scale_src0 * scale_src1 / scale_dst;
185  quantization::calculate_quantized_multiplier(multiplier, &dst_multiplier, &dst_shift);
186  }
187 
188  // Initialize GEMM inputs/outputs
189  src0.allocator()->init(TensorInfo(TensorShape(K, M, B), 1, data_type));
190  src1.allocator()->init(TensorInfo(TensorShape(N, K, B), 1, data_type));
191  src2.allocator()->init(TensorInfo(TensorShape(N, M, B), 1, data_type));
192  init_sgemm_output(dst, src0, src1, data_type);
193 
194  // Configure function
195  if(is_quantized)
196  {
197  src0.info()->set_quantization_info(QuantizationInfo(scale_src0, offset_src0));
198  src1.info()->set_quantization_info(QuantizationInfo(scale_src1, offset_src1));
199  dst.info()->set_quantization_info(QuantizationInfo(scale_dst, offset_dst));
200  biases.allocator()->init(TensorInfo(TensorShape(N), 1, DataType::S32));
201  init_sgemm_output(tmp_dst, src0, src1, DataType::S32);
202 
203  // Configure GEMMlowp matrix multiply function
204  mm_gemmlowp.configure(&src0, &src1, nullptr, &tmp_dst);
205 
206  // Configure GEMMlowp output stage
207  GEMMLowpOutputStageInfo gemm_info{};
208  gemm_info.gemmlowp_multiplier = dst_multiplier;
209  gemm_info.gemmlowp_shift = dst_shift;
210  gemm_info.gemmlowp_offset = offset_dst;
211  mm_gemmlowp_output_stage.configure(&tmp_dst, add_bias ? &biases : nullptr, &dst, gemm_info);
212  tmp_dst.allocator()->allocate();
213  biases.allocator()->allocate();
214  fill(CLAccessor(biases), 3);
215  }
216  else
217  {
218  // Configure matrix multiply function
219  mm_gemm.configure(&src0, &src1, &src2, &dst, alpha, beta);
220  }
221 
222  // Allocate all the tensors
223  src0.allocator()->allocate();
224  src1.allocator()->allocate();
225  dst.allocator()->allocate();
226  src2.allocator()->allocate();
227 
228  fill(CLAccessor(src0), 0);
229  fill(CLAccessor(src1), 1);
230  fill(CLAccessor(src2), 2);
231 
232  return true;
233  }
234 
235  void print_parameters_internal()
236  {
237  std::cout << "Datatype : " << string_from_data_type(data_type) << "\n";
238  std::cout << "M : " << support::cpp11::to_string(M) << "\n";
239  std::cout << "N : " << support::cpp11::to_string(N) << "\n";
240  std::cout << "K : " << support::cpp11::to_string(K) << "\n";
241  std::cout << "B : " << support::cpp11::to_string(B) << "\n";
243  {
244  std::cout << "Scale_Src0 : " << support::cpp11::to_string(scale_src0) << "\n";
245  std::cout << "Offset_Src0 : " << support::cpp11::to_string(offset_src0) << "\n";
246  std::cout << "Scale_Scr1 : " << support::cpp11::to_string(scale_src1) << "\n";
247  std::cout << "Offset_Src1 : " << support::cpp11::to_string(offset_src1) << "\n";
248  std::cout << "Scale_Dst : " << support::cpp11::to_string(scale_dst) << "\n";
249  std::cout << "Offset_Dst : " << support::cpp11::to_string(offset_dst) << "\n";
250  std::cout << "Bias : " << support::cpp11::to_string(add_bias) << "\n";
251  }
252  else
253  {
254  std::cout << "Alpha : " << support::cpp11::to_string(alpha) << "\n";
255  std::cout << "Beta : " << support::cpp11::to_string(beta) << "\n";
256  }
257  }
258 
259  void do_validate() override
260  {
261  switch(data_type)
262  {
263  case DataType::F16:
264  {
265  SimpleTensor<half> ref_src0 = { TensorShape(K, M, B), data_type, 1 };
266  SimpleTensor<half> ref_src1 = { TensorShape(N, K, B), data_type, 1 };
267  SimpleTensor<half> ref_src2 = { TensorShape(N, M, B), data_type, 1 };
268 
269  fill(ref_src0, 0);
270  fill(ref_src1, 1);
271  fill(ref_src2, 2);
272 
273  SimpleTensor<half> ref_dst = reference::gemm<half>(ref_src0, ref_src1, ref_src2, alpha, beta);
275  break;
276  }
277  case DataType::F32:
278  {
279  SimpleTensor<float> ref_src0 = { TensorShape(K, M, B), data_type, 1 };
280  SimpleTensor<float> ref_src1 = { TensorShape(N, K, B), data_type, 1 };
281  SimpleTensor<float> ref_src2 = { TensorShape(N, M, B), data_type, 1 };
282 
283  fill(ref_src0, 0);
284  fill(ref_src1, 1);
285  fill(ref_src2, 2);
286 
287  SimpleTensor<float> ref_dst = reference::gemm<float>(ref_src0, ref_src1, ref_src2, alpha, beta);
289  break;
290  }
291  case DataType::QASYMM8:
292  {
293  SimpleTensor<uint8_t> ref_src0{ TensorShape(K, M, B), data_type, 1 };
294  SimpleTensor<uint8_t> ref_src1{ TensorShape(N, K, B), data_type, 1 };
295  SimpleTensor<uint8_t> ref_dst;
296 
297  // Fill reference
298  fill(ref_src0, 0);
299  fill(ref_src1, 1);
300 
301  SimpleTensor<int32_t> ref_tmp_dst = reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(ref_src0, ref_src1, TensorShape(N, M, B), offset_src0, offset_src1);
302 
303  const std::vector<int32_t> dst_multiplier_vec = { dst_multiplier };
304  const std::vector<int32_t> dst_shift_vec = { dst_shift };
305 
306  if(add_bias)
307  {
309  // Fill bias
310  fill(biases, 3);
311  ref_dst = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(ref_tmp_dst, biases, dst_multiplier_vec, dst_shift_vec, offset_dst);
312  }
313  else
314  {
315  ref_dst = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(ref_tmp_dst, dst_multiplier_vec, dst_shift_vec, offset_dst);
316  }
317  validate(CLAccessor(dst), ref_dst);
318  break;
319  }
320  default:
321  break;
322  }
323  }
324  void do_run() override
325  {
326  // Execute the function
328  {
329  // Run gemmlowp
330  mm_gemmlowp.run();
331  // Run output stage
332  mm_gemmlowp_output_stage.run();
333  }
334  else
335  {
336  // Run gemm
337  mm_gemm.run();
338  }
339 
340  // Make sure all the OpenCL jobs are done executing:
342  }
343 
344 private:
345  template <typename U>
346  void fill(U &&tensor, int i)
347  {
348  switch(tensor.data_type())
349  {
350  case DataType::F16:
351  {
353  library->fill(tensor, distribution, i);
354  break;
355  }
356  case DataType::F32:
357  {
358  std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
359  library->fill(tensor, distribution, i);
360  break;
361  }
362  case DataType::S32:
363  case DataType::QASYMM8:
364  {
365  std::uniform_int_distribution<> distribution(-6000, 6000);
366  library->fill(tensor, distribution, i);
367  break;
368  }
369  default:
370  library->fill_tensor_uniform(tensor, i);
371  }
372  }
373 
374  void consume_params(const GEMMCommandLineOptions &opts)
375  {
376  ARM_COMPUTE_ERROR_ON(opts.M->value() <= 0);
377  ARM_COMPUTE_ERROR_ON(opts.N->value() <= 0);
378  ARM_COMPUTE_ERROR_ON(opts.K->value() <= 0);
379  ARM_COMPUTE_ERROR_ON(opts.B->value() <= 0);
380  M = opts.M->value();
381  N = opts.N->value();
382  K = opts.K->value();
383  B = opts.B->value();
384  alpha = opts.alpha->value();
385  beta = opts.beta->value();
386  offset_src0 = opts.offset_src0->value();
387  offset_src1 = opts.offset_src1->value();
388  offset_dst = opts.offset_dst->value();
389  scale_src0 = opts.scale_src0->value();
390  scale_src1 = opts.scale_src1->value();
391  scale_dst = opts.scale_dst->value();
392  add_bias = opts.add_bias->is_set() ? opts.add_bias->value() : true;
393  data_type = opts.data_type->value();
394  }
395 
396  CLTensor src0{}, src1{}, src2{}, dst{};
397  CLTensor tmp_dst{}, biases{};
398 
399  CLGEMM mm_gemm{};
400  CLGEMMLowpMatrixMultiplyCore mm_gemmlowp{};
401  CLGEMMLowpOutputStage mm_gemmlowp_output_stage{};
402 
403  size_t M{ 7 }, N{ 3 }, K{ 5 }, B{ 1 };
405  float alpha{ 1.0 }, beta{ 0.0 };
406  int offset_src0{ 10 }, offset_src1{ 10 }, offset_dst{ 10 };
407  float scale_src0{ 1.0f / 255 }, scale_src1{ 1.0f / 255 }, scale_dst{ 1.0f / 255 };
408  int32_t dst_multiplier{ 0 }, dst_shift{ 0 };
409  bool add_bias{ true };
410 };
411 
412 /** Main program for gemm test
413  *
414  * @param[in] argc Number of arguments
415  * @param[in] argv Arguments
416  *
417  */
418 int main(int argc, char **argv)
419 {
420  return utils::run_example<CLGEMMValidateExample>(argc, argv);
421 }
arm_compute::support::cpp11::to_string
std::string to_string(T &&value)
Convert integer and float values to string.
Definition: StringSupport.h:168
ClGemmMatrixMultiplyReshapedKernel.h
arm_compute::utils::init_sgemm_output
void init_sgemm_output(T &dst, T &src0, T &src1, arm_compute::DataType dt)
Definition: Utils.h:807
arm_compute::CLGEMMLowpMatrixMultiplyCore
Basic function to execute GEMMLowpMatrixMultiplyCore on OpenCL.
Definition: CLGEMMLowpMatrixMultiplyCore.h:42
N
unsigned int N
Definition: CpuGemmAssemblyDispatch.cpp:103
ClGemmLowpMatrixMultiplyNativeKernel.h
arm_compute::QuantizationInfo
Quantization information.
Definition: QuantizationInfo.h:67
arm_compute::GEMMLowpOutputStageInfo::gemmlowp_multiplier
int32_t gemmlowp_multiplier
GEMMLowp output stage multiplier used for quantizing to QASYMM8.
Definition: GEMMInfo.h:49
arm_compute::test
Definition: tests.dox:26
CommandLineOptions.h
AssetsLibrary.h
arm_compute::GEMMLowpOutputStageInfo
GEMMLowp output stage info.
Definition: GEMMInfo.h:45
ClGemmLowpOffsetContributionKernel.h
arm_compute::DataType::QASYMM8
@ QASYMM8
quantized, asymmetric fixed-point 8-bit number unsigned
arm_compute::TensorShape
Shape of a tensor.
Definition: TensorShape.h:39
arm_compute::test::validation::dst
auto dst
Definition: DFT.cpp:170
arm_compute::CLScheduler::sync
void sync()
Blocks until all commands in the associated command queue have finished.
Definition: CLScheduler.cpp:71
tolerance_f16
RelativeTolerance< half_float::half > tolerance_f16(half(0.2))
F16 Tolerance value for comparing reference's output against implementation's output for floating poi...
Types.h
ClGemmLowpOffsetContributionOutputStageKernel.h
SimpleTensor.h
ClGemmLowpMatrixMultiplyReshapedOnlyRhsKernel.h
ClCastKernel.h
arm_compute::CLTensor
Basic implementation of the OpenCL tensor interface.
Definition: CLTensor.h:41
arm_compute::string_from_data_type
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
Definition: DataTypeUtils.cpp:31
GemmTuner.parser
parser
Definition: GemmTuner.py:640
CLGEMM.h
CLGEMMLowpOutputStage.h
CLAccessor.h
ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h
arm_compute::half
half_float::half half
16-bit floating point type
Definition: CoreTypes.h:36
ClGemmLowpReductionKernel.h
ARM_COMPUTE_ERROR_ON
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
arm_compute::CLGEMMLowpOutputStage
Basic function to execute GEMMLowpQuantizeDown kernels on CL.
Definition: CLGEMMLowpOutputStage.h:56
ClWeightsReshapeKernel.h
TypePrinter.h
arm_compute::test::CLAccessor
Accessor implementation for CLTensor objects.
Definition: CLAccessor.h:36
IAccessor.h
arm_compute::Channel::B
@ B
Blue channel.
CLFillBorderKernel.h
arm_compute::test::validation::fill
library fill(src, distribution, 0)
CommandLineParser.h
CLScheduler.h
Interface to enqueue OpenCL kernels and get/set the OpenCL CommandQueue and ICLTuner.
arm_compute::test::validation
Definition: AbsLayer.cpp:41
K
unsigned int K
Definition: CpuGemmAssemblyDispatch.cpp:104
arm_compute::CLGEMM
Basic function to execute GEMM on OpenCL.
Definition: CLGEMM.h:45
GEMM.h
ClGemmReshapeRhsMatrixKernel.h
tensor
CLTensor * tensor
Pointer to the auxiliary tensor.
Definition: ClWorkloadRuntime.cpp:67
arm_compute::CLScheduler::get
static CLScheduler & get()
Access the scheduler singleton.
Definition: CLScheduler.cpp:112
arm_compute::test::validation::data_type
data_type
Definition: Cast.cpp:222
Validation.h
arm_compute::test::library
std::unique_ptr< AssetsLibrary > library
Definition: main.cpp:77
ClGemmReshapeLhsMatrixKernel.h
AsymmHelpers.h
abs_tolerance_f32
constexpr float abs_tolerance_f32(0.0001f)
F32 Absolute tolerance value for comparing reference's output against implementation's output for flo...
Globals.h
Utils.h
arm_compute::test::SimpleTensor
Simple tensor object that stores elements in a consecutive chunk of memory.
Definition: SimpleTensor.h:58
arm_compute::TensorInfo
Store the tensor's metadata.
Definition: TensorInfo.h:41
CLGEMMLowpMatrixMultiplyCore.h
arm_compute::CLScheduler::default_init
void default_init(ICLTuner *cl_tuner=nullptr, CLGEMMHeuristicsHandle *gemm_h=nullptr, CLBackendType cl_backend_type=CLBackendType::Native)
Initialises the context and command queue used by the scheduler to default values and sets a default ...
Definition: CLScheduler.cpp:134
M
unsigned int M
Definition: CpuGemmAssemblyDispatch.cpp:102
arm_compute
Copyright (c) 2017-2023 Arm Limited.
Definition: introduction.dox:24
tolerance_f32
RelativeTolerance< float > tolerance_f32(0.001f)
F32 Tolerance value for comparing reference's output against implementation's output for floating poi...
arm_compute::DataType::F16
@ F16
16-bit floating-point number
tolerance_num_f16
constexpr float tolerance_num_f16
F16 Tolerance number.
Definition: cl_gemm.cpp:75
ClIm2ColKernel.h
arm_compute::DataType::S32
@ S32
signed 32-bit number
arm_compute::test::validation::distribution
std::uniform_real_distribution< float > distribution(-5.f, 5.f)
arm_compute::quantization::calculate_quantized_multiplier
Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplier, int32_t *shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier.
Definition: AsymmHelpers.cpp:43
arm_compute::validate
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)
Definition: CPPBoxWithNonMaximaSuppressionLimit.cpp:243
arm_compute::is_data_type_quantized
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: DataTypeUtils.h:324
ValidateExample.h
arm_compute::DataType::F32
@ F32
32-bit floating-point number
main
int main(int argc, char **argv)
Main program for gemm test.
Definition: cl_gemm.cpp:418
arm_compute::test::validation::RelativeTolerance
Class reprensenting a relative tolerance value.
Definition: Validation.h:97
GEMMLowp.h
arm_compute::DataType
DataType
Available data types.
Definition: CoreTypes.h:83
GemmTuner.help
help
Definition: GemmTuner.py:648
arm_compute::utils::uniform_real_distribution_16bit
Specialized class to generate random non-zero FP16 values.
Definition: Utils.h:255