Compute Library
 21.05
CLGEMMLowpMatrixMultiplyCore.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
27 #include "arm_compute/core/Error.h"
30 #include "arm_compute/core/Log.h"
32 #include "arm_compute/core/Types.h"
46 #include "utils/TypePrinter.h"
47 
48 namespace arm_compute
49 {
51 using namespace arm_compute::cl_gemm;
52 
53 namespace
54 {
55 inline bool validate_gemm_kernel(CLGEMMKernelType kernel_type)
56 {
57  switch(kernel_type)
58  {
61  {
62  return true;
63  }
64  default:
65  {
66  return false;
67  }
68  }
69 }
70 //Automatically select between mlgo (prioritized) and default heuristics for gemm kernel type
71 inline CLGEMMKernelType auto_select_gemm_kernel(auto_heuristics::CommonQuery query, bool reshape_b_only_on_first_run)
72 {
73  auto gemm_kernel = auto_heuristics::select_mlgo_gemm_kernel(query, reshape_b_only_on_first_run);
74  if(bool(gemm_kernel))
75  {
76  if(validate_gemm_kernel(gemm_kernel.gemm_type))
77  {
78  ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("Use gemm kernel from mlgo heuristics: %s.", to_string(gemm_kernel.gemm_type).c_str());
79  return gemm_kernel.gemm_type;
80  }
81  }
82  gemm_kernel = auto_heuristics::select_default_gemm_kernel(query, reshape_b_only_on_first_run);
83  ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("Use gemm kernel from default heuristics: %s.", to_string(gemm_kernel.gemm_type).c_str());
84  return gemm_kernel.gemm_type;
85 }
86 // Validate lhs_info and rhs_info for native kernel
87 inline bool validate_lhs_rhs_info_native(const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const ITensorInfo *a, const ITensorInfo *b, const GEMMReshapeInfo &reshape_info)
88 {
89  // Validate GEMMLHSMatrixInfo and GEMMRHSMatrixInfo for reshaped only rhs kernel
90  TensorInfo mm_result_s32_info{};
91  // Output tensor auto initialization if not yet initialized
92  auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*a, *b, false, reshape_info)).set_data_type(DataType::S32));
93  // Validate mm kernel
94  // NOTE: Ignore all other parameters (eg. output stage etc.) and only validate lhs and rhs info
95  // NOTE: This assumes:
96  // 1. lhs and rhs info's validity does not depend on these other parameters and vice versa(in CLGEMMLowpMatrixMultiplyNativeKernel.cpp validate_arguments).
97  // 2. lhs and rhs info does not cause window and padding issues through side effects (in CLGEMMLowpMatrixMultiplyNativeKernel.cpp validate_and_configure_window).
98  if(!bool(CLGEMMLowpMatrixMultiplyNativeKernel::validate(a, b, &mm_result_s32_info, lhs_info, rhs_info, reshape_info)))
99  {
100  return false;
101  }
102  return true;
103 }
104 
105 // Automatically select between mlgo (prioritized) and default heuristics for native kernel configs
106 std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> auto_select_gemm_config_native(auto_heuristics::CommonQuery query, const ITensorInfo *a, const ITensorInfo *b, const GEMMReshapeInfo &reshape_info)
107 {
109  if(config)
110  {
111  if(validate_lhs_rhs_info_native(config.lhs_info, config.rhs_info, a, b, reshape_info))
112  {
113  ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("Use native config from mlgo heuristics: LHS info: %s ; RHS info: %s ", to_string(config.lhs_info).c_str(), to_string(config.rhs_info).c_str());
114  return { config.lhs_info, config.rhs_info };
115  }
116  }
118  ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("Use native config from default heuristics: LHS info: %s ; RHS info: %s ", to_string(config.lhs_info).c_str(), to_string(config.rhs_info).c_str());
119  return { config.lhs_info, config.rhs_info };
120 }
121 
122 // Validate lhs_info and rhs_info for reshaped only rhs kernel
123 inline bool validate_lhs_rhs_info_reshaped_only_rhs(const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output,
124  unsigned int m, unsigned int n, unsigned int k, bool reinterpret_input_as_3d, int depth_output_gemm3d)
125 {
126  // Validate GEMMLHSMatrixInfo and GEMMRHSMatrixInfo for reshaped only rhs kernel
127  TensorInfo tmp_b_info{};
128  // Validate reshape RHS kernel
129  auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*b, rhs_info)));
130  if(!bool(CLGEMMReshapeRHSMatrixKernel::validate(b, &tmp_b_info, rhs_info)))
131  {
132  return false;
133  }
134  // Validate mm kernel
135  // NOTE: Ignore all other parameters (eg. depth_output_gemm3d, output stage etc.) and only validate lhs and rhs info
136  // NOTE: This assumes:
137  // 1. lhs and rhs info's validity does not depend on these other parameters and vice versa(in CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp validate_arguments).
138  // 2. lhs and rhs info does not cause window and padding issues through side effects (in CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp validate_and_configure_window).
139  GEMMKernelInfo gemm_kernel_info;
140  gemm_kernel_info.m = m;
141  gemm_kernel_info.n = n;
142  gemm_kernel_info.k = k;
143  gemm_kernel_info.reinterpret_input_as_3d = reinterpret_input_as_3d;
144  gemm_kernel_info.depth_output_gemm3d = depth_output_gemm3d;
145  gemm_kernel_info.lhs_info = lhs_info;
146  gemm_kernel_info.rhs_info = rhs_info;
147  // Since we ignore the output stage, output data type has to be S32 to pass the validation
148  TensorInfo output_info_copy(*output);
149  output_info_copy.set_data_type(DataType::S32);
150  if(!bool(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(a, &tmp_b_info, &output_info_copy, gemm_kernel_info)))
151  {
152  return false;
153  }
154  return true;
155 }
156 
157 // Automatically select between mlgo (prioritized) and default heuristics for reshaped only rhs kernel configs
158 std::pair<GEMMLHSMatrixInfo, GEMMRHSMatrixInfo> auto_select_gemm_config_reshaped_only_rhs(auto_heuristics::CommonQuery query, bool reinterpret_input_as_3d, int depth_output_gemm3d,
159  const ITensorInfo *a,
160  const ITensorInfo *b, const ITensorInfo *output)
161 {
163  if(config)
164  {
165  if(validate_lhs_rhs_info_reshaped_only_rhs(config.lhs_info, config.rhs_info, a, b, output, query.m, query.n, query.k, reinterpret_input_as_3d, depth_output_gemm3d))
166  {
167  ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("Use reshaped_only_rhs config from mlgo heuristics: LHS info: %s ; RHS info: %s ", to_string(config.lhs_info).c_str(), to_string(config.rhs_info).c_str());
168  return { config.lhs_info, config.rhs_info };
169  }
170  }
172  ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("Use reshaped_only_rhs config from default heuristics: LHS info: %s ; RHS info: %s ", to_string(config.lhs_info).c_str(), to_string(config.rhs_info).c_str());
173  return { config.lhs_info, config.rhs_info };
174 }
175 
176 inline bool is_gemm_reshaped(CLGEMMKernelType kernel_type)
177 {
178  switch(kernel_type)
179  {
181  return false;
183  return true;
184  default:
185  ARM_COMPUTE_ERROR("Not supported gemmlowp kernel!");
186  }
187 }
188 } // namespace
189 
190 CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
191  : _memory_group(std::move(memory_manager)),
192  _weights_to_qasymm8(std::make_unique<CLDepthConvertLayerKernel>()),
193  _mm_native_kernel(std::make_unique<CLGEMMLowpMatrixMultiplyNativeKernel>()),
194  _mm_reshaped_only_rhs_kernel(std::make_unique<CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel>()),
195  _mtx_b_reshape_kernel(std::make_unique<CLGEMMReshapeRHSMatrixKernel>()),
196  _mtx_a_reduction_kernel(std::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
197  _mtx_b_reduction_kernel(std::make_unique<CLGEMMLowpMatrixBReductionKernel>()),
198  _offset_contribution_kernel(std::make_unique<CLGEMMLowpOffsetContributionKernel>()),
199  _offset_contribution_output_stage_kernel(std::make_unique<CLGEMMLowpOffsetContributionOutputStageKernel>()),
200  _qasymm8_weights(),
201  _vector_sum_col(),
202  _vector_sum_row(),
203  _tmp_b(),
204  _mm_result_s32(),
205  _gemm_output_stage_multipliers(),
206  _gemm_output_stage_shifts(),
207  _matrix_a(nullptr),
208  _original_b(nullptr),
209  _output(nullptr),
210  _a_offset(0),
211  _b_offset(0),
212  _is_gemm_reshaped(true),
213  _reshape_b_only_on_first_run(false),
214  _is_prepared(false),
215  _run_output_stage(false),
216  _convert_to_qasymm8(false),
217  _run_offset_contribution(false)
218 {
219 }
220 
222 
223 void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, const GEMMInfo &gemm_info)
224 {
225  configure(CLKernelLibrary::get().get_compile_context(), a, b, c, output, gemm_info);
226 }
227 
228 void CLGEMMLowpMatrixMultiplyCore::configure(const CLCompileContext &compile_context, const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, const GEMMInfo &gemm_info)
229 {
230  ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output);
231  ARM_COMPUTE_ERROR_THROW_ON(CLGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, output->info(), gemm_info));
232 
233  _is_prepared = false;
234  _original_b = b;
235  _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
236  _a_offset = a->info()->quantization_info().uniform().offset;
237  _matrix_a = a;
238  _output = output;
239 
240  _convert_to_qasymm8 = is_data_type_quantized_per_channel(b->info()->data_type()) && is_data_type_quantized_symmetric(b->info()->data_type())
241  && a->info()->data_type() == DataType::QASYMM8;
242  _b_offset = _convert_to_qasymm8 ? -128 : b->info()->quantization_info().uniform().offset;
243 
244  // Get the GPU target
245  const GPUTarget gpu_target = CLScheduler::get().target();
246 
247  // Set the target for the kernels
248  _mm_native_kernel->set_target(gpu_target);
249  _mm_reshaped_only_rhs_kernel->set_target(gpu_target);
250 
251  GEMMRHSMatrixInfo rhs_info;
252  GEMMLHSMatrixInfo lhs_info;
253 
254  // Arguments used by GEMMReshapeInfo
255  // If we pass the matrix A and matrix B reshaped to CLGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to CLGEMMReshapeInfo
256  // in order to know how the matrices have been reshaped
257  bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
258  const unsigned int m = reinterpret_input_as_3d ? (a->info()->dimension(1) * a->info()->dimension(2)) : a->info()->dimension(1);
259  const unsigned int n = b->info()->dimension(0);
260  const unsigned int k = a->info()->dimension(0);
261  const unsigned int batch_size = reinterpret_input_as_3d ? a->info()->dimension(3) : a->info()->dimension(2);
262  const int depth_output_gemm3d = gemm_info.depth_output_gemm3d();
263 
264  const auto reshape_info = GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d);
265 
266  // Check if we need to reshape the matrix A and matrix B
267  _is_gemm_reshaped = is_gemm_reshaped(auto_select_gemm_kernel(auto_heuristics::CommonQuery{ gpu_target, a->info()->data_type(), m, n, k, batch_size }, _reshape_b_only_on_first_run));
268 
269  if(_convert_to_qasymm8)
270  {
271  // Set data type for converted weights
272  TensorInfo weights_info(*b->info());
273  weights_info.set_data_type(DataType::QASYMM8);
274  _qasymm8_weights.allocator()->init(weights_info);
275  _weights_to_qasymm8->configure(compile_context, b, &_qasymm8_weights, ConvertPolicy::WRAP, 0);
276  }
277 
278  const ICLTensor *matrix_b = _convert_to_qasymm8 ? &_qasymm8_weights : b;
279  if(_is_gemm_reshaped)
280  {
281  matrix_b = &_tmp_b;
282 
283  if(!_reshape_b_only_on_first_run)
284  {
285  _memory_group.manage(&_tmp_b);
286  }
287 
288  // Pick up the GEMM configuration
289  // It doesn't matter whether Datatype is DataType::QASYMM8 or DataType::QASYMM8_SIGNED, since it only affect the shape configuration
290  std::tie(lhs_info, rhs_info) = auto_select_gemm_config_reshaped_only_rhs(auto_heuristics::CommonQuery{ gpu_target, DataType::QASYMM8, m, n, k, batch_size }, reinterpret_input_as_3d,
291  depth_output_gemm3d,
292  a->info(), _convert_to_qasymm8 ? _qasymm8_weights.info() : b->info(), output->info());
293 
294  // Configure reshape RHS kernel
295  _mtx_b_reshape_kernel->configure(compile_context, _convert_to_qasymm8 ? &_qasymm8_weights : b, &_tmp_b, rhs_info);
296  }
297 
298  // Using default reduction info
299  const GEMMLowpReductionKernelInfo reduction_info {};
300 
301  // Initialize matrix B reduction kernel only if _a_offset is not equal to 0
302  if(_a_offset != 0)
303  {
304  TensorInfo info_vector_sum_col(compute_reductionA_shape(*b->info()), 1, DataType::S32);
305  _vector_sum_col.allocator()->init(info_vector_sum_col);
306  if(!_reshape_b_only_on_first_run)
307  {
308  _memory_group.manage(&_vector_sum_col);
309  }
310 
311  // Configure Matrix B reduction kernel
312  _mtx_b_reduction_kernel->configure(compile_context, _convert_to_qasymm8 ? &_qasymm8_weights : b, &_vector_sum_col, reduction_info);
313  }
314 
315  // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0
316  if(_b_offset != 0)
317  {
318  TensorInfo info_vector_sum_row(compute_reductionB_shape(*a->info()), 1, DataType::S32);
319  _vector_sum_row.allocator()->init(info_vector_sum_row);
320  _memory_group.manage(&_vector_sum_row);
321 
322  // Configure matrix A reduction kernel
323  _mtx_a_reduction_kernel->configure(compile_context, a, &_vector_sum_row, reduction_info);
324  }
325 
326  GEMMKernelInfo gemm_kernel_info;
327  gemm_kernel_info.m = m;
328  gemm_kernel_info.n = n;
329  gemm_kernel_info.k = k;
330  gemm_kernel_info.depth_output_gemm3d = depth_output_gemm3d;
331  gemm_kernel_info.reinterpret_input_as_3d = reinterpret_input_as_3d;
332  gemm_kernel_info.lhs_info = lhs_info;
333  gemm_kernel_info.rhs_info = rhs_info;
334  gemm_kernel_info.a_offset = _a_offset;
335  gemm_kernel_info.b_offset = _b_offset;
336  // If GEMMLowpOutputStage != NONE, fuse the offset contribution with the output stage
338  {
339  // Configure offset contribution kernel
340  const size_t num_filters = (gemm_info.gemmlowp_output_stage().is_quantized_per_channel) ? gemm_info.gemmlowp_output_stage().gemmlowp_multipliers.size() : 1;
341 
342  _gemm_output_stage_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
343  _gemm_output_stage_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
344 
345  GEMMLowpOutputStageInfo gemmlowp_output_stage = gemm_info.gemmlowp_output_stage();
346  gemmlowp_output_stage.output_data_type = _matrix_a->info()->data_type();
347 
348  gemm_kernel_info.output_stage = gemmlowp_output_stage;
349 
350  if(_is_gemm_reshaped && gemmlowp_output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
351  {
352  // Configure and tune matrix multiply kernel with fused output stage
353  _mm_reshaped_only_rhs_kernel->configure(compile_context, _matrix_a, matrix_b, output, gemm_kernel_info, _a_offset == 0 ? nullptr : &_vector_sum_col,
354  _b_offset == 0 ? nullptr : &_vector_sum_row, c, &_gemm_output_stage_multipliers, &_gemm_output_stage_shifts);
355  }
356  else
357  {
358  _run_output_stage = true;
359 
360  _memory_group.manage(&_mm_result_s32);
361 
362  if(_is_gemm_reshaped)
363  {
364  _mm_reshaped_only_rhs_kernel->configure(compile_context, _matrix_a, matrix_b, &_mm_result_s32, gemm_kernel_info);
365  }
366  else
367  {
368  // Pick up the GEMM configuration
369  // It doesn't matter whether Datatype is DataType::QASYMM8 or DataType::QASYMM8_SIGNED, since it only affect the shape configuration
370  std::tie(lhs_info, rhs_info) = auto_select_gemm_config_native(auto_heuristics::CommonQuery{ gpu_target, DataType::QASYMM8, m, n, k, batch_size },
371  _matrix_a->info(), _convert_to_qasymm8 ? _qasymm8_weights.info() : matrix_b->info(), reshape_info);
372 
373  // Configure matrix multiply kernel
374  _mm_native_kernel->configure(compile_context, _matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, reshape_info);
375 
376  _offset_contribution_output_stage_kernel->configure(compile_context, &_mm_result_s32, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, output,
377  a->info()->dimension(0),
378  _a_offset, _b_offset, gemmlowp_output_stage, &_gemm_output_stage_multipliers, &_gemm_output_stage_shifts);
379  _mm_result_s32.allocator()->allocate();
380  }
381  }
382 
383  _gemm_output_stage_multipliers.allocator()->allocate();
384  _gemm_output_stage_shifts.allocator()->allocate();
385  // Compute GEMM output multipliers and shifts for output stage
386  _gemm_output_stage_multipliers.map();
387  _gemm_output_stage_shifts.map();
388  std::memcpy(_gemm_output_stage_multipliers.ptr_to_element(Coordinates(0)), gemm_info.gemmlowp_output_stage().gemmlowp_multipliers.data(), num_filters * sizeof(int32_t));
389  std::memcpy(_gemm_output_stage_shifts.ptr_to_element(Coordinates(0)), gemm_info.gemmlowp_output_stage().gemmlowp_shifts.data(), num_filters * sizeof(int32_t));
390  _gemm_output_stage_multipliers.unmap();
391  _gemm_output_stage_shifts.unmap();
392  }
393  else
394  {
395  _run_offset_contribution = true;
396  if(_is_gemm_reshaped)
397  {
398  // Configure and tune matrix multiply kernel
399  _mm_reshaped_only_rhs_kernel->configure(compile_context, _matrix_a, matrix_b, output, gemm_kernel_info);
400  }
401  else
402  {
403  // Pick up the GEMM configuration
404  // It doesn't matter whether Datatype is DataType::QASYMM8 or DataType::QASYMM8_SIGNED, since it only affect the shape configuration
405  std::tie(lhs_info, rhs_info) = auto_select_gemm_config_native(auto_heuristics::CommonQuery{ gpu_target, DataType::QASYMM8, m, n, k, batch_size },
406  a->info(), _convert_to_qasymm8 ? _qasymm8_weights.info() : b->info(), reshape_info);
407 
408  // Configure matrix multiply kernel
409  _mm_native_kernel->configure(compile_context, _matrix_a, matrix_b, output, lhs_info, rhs_info, reshape_info);
410  }
411 
412  // Configure offset contribution kernel
413  _offset_contribution_kernel->configure(compile_context, output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, a->info()->dimension(0), _a_offset,
414  _b_offset);
415  }
416 
417  // Allocate tensors
418  if(_is_gemm_reshaped)
419  {
420  if(!_reshape_b_only_on_first_run)
421  {
422  _tmp_b.allocator()->allocate();
423  }
424  }
425 
426  if(_a_offset != 0 && !_reshape_b_only_on_first_run)
427  {
428  _vector_sum_col.allocator()->allocate();
429  }
430 
431  if(_b_offset != 0)
432  {
433  _vector_sum_row.allocator()->allocate();
434  }
435 }
436 
437 Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info)
438 {
439  ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output);
444  ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
445  ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
446 
447  int32_t a_offset = a->quantization_info().uniform().offset;
448  int32_t b_offset = b->quantization_info().uniform().offset;
449 
450  const ITensorInfo *matrix_a_info = a;
451 
452  TensorInfo tmp_b_info{};
453  GEMMRHSMatrixInfo rhs_info;
454  GEMMLHSMatrixInfo lhs_info;
455 
456  // Get the GPU target
457  const GPUTarget gpu_target = CLScheduler::get().target();
458 
459  bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
460  const unsigned int m = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1);
461  const unsigned int n = b->dimension(0);
462  const unsigned int k = a->dimension(0);
463  const unsigned int batch_size = reinterpret_input_as_3d ? a->dimension(3) : a->dimension(2);
464  const int depth_output_gemm3d = gemm_info.depth_output_gemm3d();
465 
466  bool reshape_matrix_b = is_gemm_reshaped(auto_select_gemm_kernel(auto_heuristics::CommonQuery{ gpu_target, a->data_type(), m, n, k, batch_size }, gemm_info.reshape_b_only_on_first_run()));
467 
468  const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d);
469 
470  bool convert_to_qasymm8 = is_data_type_quantized_per_channel(b->data_type()) && is_data_type_quantized_symmetric(b->data_type())
473  if(convert_to_qasymm8)
474  {
475  b_offset = -128;
476  weights_info.set_data_type(DataType::QASYMM8);
478  }
479  const ITensorInfo *matrix_b_info = &weights_info;
480  if(reshape_matrix_b)
481  {
482  matrix_b_info = &tmp_b_info;
483 
484  // Pick up the GEMM configuration
485  // NOTE: No need to validate mlgo configurations as they automatically fall back to default heuristics if validation fails
486  // It doesn't matter whether Datatype is DataType::QASYMM8 or DataType::QASYMM8_SIGNED, since it only affect the shape configuration
487  const auto res = select_default_gemm_config_reshaped_only_rhs(auto_heuristics::CommonQuery{ gpu_target, DataType::QASYMM8, m, n, k, batch_size });
488  lhs_info = res.lhs_info;
489  rhs_info = res.rhs_info;
490 
491  // Validate reshape RHS kernel
492  auto_init_if_empty(tmp_b_info, weights_info.clone()->set_tensor_shape(compute_rhs_reshaped_shape(weights_info, rhs_info)));
494  }
495 
496  TensorInfo info_vector_sum_col{};
497  TensorInfo info_vector_sum_row{};
498 
499  const GEMMLowpReductionKernelInfo reduction_info;
500  // Validate matrix B reduction kernel only if _a_offset is not equal to 0
501  if(a_offset != 0)
502  {
504 
505  // Configure Matrix B reduction kernel
507  }
508 
509  // Validate Matrix A reduction kernel only if _b_offset is not equal to 0
510  if(b_offset != 0)
511  {
512  info_vector_sum_row = TensorInfo(compute_reductionB_shape(*a), 1, DataType::S32);
513 
514  // Configure matrix A reduction kernel
515  ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixAReductionKernel::validate(a, &info_vector_sum_row, reduction_info));
516  }
517 
518  GEMMKernelInfo gemm_kernel_info;
519  gemm_kernel_info.m = m;
520  gemm_kernel_info.n = n;
521  gemm_kernel_info.k = k;
522  gemm_kernel_info.depth_output_gemm3d = depth_output_gemm3d;
523  gemm_kernel_info.reinterpret_input_as_3d = reinterpret_input_as_3d;
524  gemm_kernel_info.lhs_info = lhs_info;
525  gemm_kernel_info.rhs_info = rhs_info;
526  gemm_kernel_info.a_offset = a_offset;
527  gemm_kernel_info.b_offset = b_offset;
529  {
530  const size_t num_filters = (gemm_info.gemmlowp_output_stage().is_quantized_per_channel) ? gemm_info.gemmlowp_output_stage().gemmlowp_multipliers.size() : 1;
531 
532  const TensorInfo gemm_output_stage_multipliers_shifts_info(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
533 
534  GEMMLowpOutputStageInfo gemmlowp_output_stage = gemm_info.gemmlowp_output_stage();
535  gemmlowp_output_stage.output_data_type = a->data_type();
536 
537  gemm_kernel_info.output_stage = gemmlowp_output_stage;
539  {
540  ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, output, gemm_kernel_info,
541  a_offset == 0 ? nullptr : &info_vector_sum_col,
542  b_offset == 0 ? nullptr : &info_vector_sum_row,
543  c,
544  &gemm_output_stage_multipliers_shifts_info,
545  &gemm_output_stage_multipliers_shifts_info));
546  }
547  else
548  {
549  TensorInfo mm_result_s32_info{};
550 
551  if(reshape_matrix_b)
552  {
553  // Output tensor auto inizialitation if not yet initialized
554  auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, reshape_info)).set_data_type(DataType::S32));
555 
556  // Validate matrix multiply
557  ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, gemm_kernel_info));
558  }
559  else
560  {
561  // Output tensor auto inizialitation if not yet initialized
562  auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, false, reshape_info)).set_data_type(DataType::S32));
563 
564  // Pick up the GEMM configuration
565  // NOTE: No need to validate mlgo configurations as they automatically fall back to default heuristics if validation fails
566  // It doesn't matter whether Datatype is DataType::QASYMM8 or DataType::QASYMM8_SIGNED, since it only affect the shape configuration
567  const auto res = select_default_gemm_config_native(auto_heuristics::CommonQuery{ gpu_target, DataType::QASYMM8, m, n, k, batch_size });
568  lhs_info = res.lhs_info;
569  rhs_info = res.rhs_info;
570 
571  // Validate matrix multiply
572  ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyNativeKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, lhs_info, rhs_info, reshape_info));
573  }
574 
575  // Validate offset contribution kernel
577  a_offset == 0 ? nullptr : &info_vector_sum_col,
578  b_offset == 0 ? nullptr : &info_vector_sum_row,
579  c,
580  output,
581  a_offset, b_offset,
582  gemmlowp_output_stage,
583  &gemm_output_stage_multipliers_shifts_info,
584  &gemm_output_stage_multipliers_shifts_info));
585  }
586  }
587  else
588  {
589  if(reshape_matrix_b)
590  {
591  // Validate matrix multiply
592  ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, output, gemm_kernel_info));
593  }
594  else
595  {
596  // Pick up the GEMM configuration
597  // It doesn't matter whether Datatype is DataType::QASYMM8 or DataType::QASYMM8_SIGNED, since it only affect the shape configuration
598  const auto res = select_default_gemm_config_native(auto_heuristics::CommonQuery{ gpu_target, DataType::QASYMM8, m, n, k, batch_size });
599  lhs_info = res.lhs_info;
600  rhs_info = res.rhs_info;
601 
602  // Validate matrix multiply
603  ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyNativeKernel::validate(matrix_a_info, matrix_b_info, output, lhs_info, rhs_info, reshape_info));
604  }
605 
606  if(output->total_size() != 0)
607  {
608  // Validate offset contribution kernel
610  a_offset == 0 ? nullptr : &info_vector_sum_col,
611  b_offset == 0 ? nullptr : &info_vector_sum_row,
612  c,
613  a_offset, b_offset));
614  }
615  }
616 
617  return Status{};
618 }
619 
621 {
622  prepare();
623 
624  MemoryGroupResourceScope scope_mg(_memory_group);
625 
626  if(_is_gemm_reshaped)
627  {
628  if(!_reshape_b_only_on_first_run)
629  {
630  // Run reshape matrix B
631  CLScheduler::get().enqueue(*_mtx_b_reshape_kernel, false);
632  }
633  }
634 
635  // Run matrix B reduction kernel only if _a_offset is not equal to 0
636  if(_a_offset != 0 && !_reshape_b_only_on_first_run)
637  {
638  CLScheduler::get().enqueue(*_mtx_b_reduction_kernel, false);
639  }
640 
641  // Run matrix A reduction kernel only if _b_offset is not equal to 0
642  if(_b_offset != 0)
643  {
644  CLScheduler::get().enqueue(*_mtx_a_reduction_kernel, false);
645  }
646 
647  // Run matrix multiply
648  if(_is_gemm_reshaped)
649  {
650  CLScheduler::get().enqueue(*_mm_reshaped_only_rhs_kernel, false);
651  }
652  else
653  {
654  CLScheduler::get().enqueue(*_mm_native_kernel, false);
655  }
656  if(_run_output_stage)
657  {
658  // Run offset contribution/output stage kernel
659  CLScheduler::get().enqueue(*_offset_contribution_output_stage_kernel, true);
660  }
661  if(_run_offset_contribution)
662  {
663  // Run offset contribution kernel
664  CLScheduler::get().enqueue(*_offset_contribution_kernel, true);
665  }
666 }
667 
669 {
670  if(!_is_prepared)
671  {
672  if(_convert_to_qasymm8)
673  {
674  _qasymm8_weights.allocator()->allocate();
675  CLScheduler::get().enqueue(*_weights_to_qasymm8, false);
676  }
677 
678  if(_is_gemm_reshaped && _reshape_b_only_on_first_run)
679  {
680  ARM_COMPUTE_ERROR_ON(!_original_b->is_used());
681 
682  // Run reshape kernel and mark original weights tensor as unused
683  _tmp_b.allocator()->allocate();
684  CLScheduler::get().enqueue(*_mtx_b_reshape_kernel, false);
685  _original_b->mark_as_unused();
686  }
687 
688  // Run matrix B reduction kernel only if _a_offset is not equal to 0
689  if(_a_offset != 0 && _reshape_b_only_on_first_run)
690  {
691  _vector_sum_col.allocator()->allocate();
692  CLScheduler::get().enqueue(*_mtx_b_reduction_kernel, false);
693  }
694 
695  CLScheduler::get().queue().finish();
696  _is_prepared = true;
697  }
698 }
699 } // namespace arm_compute
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
Definition: ITensor.h:63
Shape of a tensor.
Definition: TensorShape.h:39
Quantize using a fixed point multiplication.
TensorInfo * info() const override
Interface to be implemented by the child class to return the tensor's metadata.
Definition: CLTensor.cpp:41
Descriptor used by the GEMM kernels.
void prepare() override
Prepare the function for executing.
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, int32_t a_offset, int32_t b_offset)
Static function to check if given info will lead to a valid configuration of CLGEMMLowpOffsetContribu...
static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
Static function to check if given info will lead to a valid configuration of CLDepthConvertLayerKerne...
SimpleTensor< float > b
Definition: DFT.cpp:157
static CLScheduler & get()
Access the scheduler singleton.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
GPUTarget target() const
Get the target GPU.
Definition: CLScheduler.cpp:45
unsigned int depth_output_gemm3d
Depth of the output tensor in case is reinterpreted as 3D.
OpenCL kernel to reshape the RHS matrix when performing the matrix multiplication In particular,...
GEMM reshape information class.
Definition: Types.h:1759
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
OpenCL kernel to multiply matrices with QASYMM8/QASYMM8_SIGNED data type.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info)
Calculate the matrix multiplication output shape of two tensors.
bool is_used() const
Flags if the tensor is used or not.
Definition: ITensor.cpp:163
GEMMLowpOutputStageInfo gemmlowp_output_stage() const
GEMMLowp output stage.
Definition: Types.h:2041
TensorShape compute_reductionA_shape(const ITensorInfo &b)
Calculate the reductionA shape used in GEMMLowp.
static Status validate(const ITensorInfo *mtx_b, const ITensorInfo *vector_sum_col, const GEMMLowpReductionKernelInfo &info)
Static function to check if given info will lead to a valid configuration of CLGEMMLowpMatrixBReducti...
A collection of adaptor functions that enable the auto selection between mlgo-based heuristics and de...
OpenCL kernel used to add the offset contribution after the matrix multiplication and perform the out...
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
static CLKernelLibrary & get()
Access the KernelLibrary singleton.
GEMM LHS (Left Hand Side) matrix information.
Definition: Types.h:1903
Store the tensor's metadata.
Definition: ITensorInfo.h:40
CLTensorAllocator * allocator()
Return a pointer to the tensor's allocator.
Definition: CLTensor.cpp:61
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
void configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, const GEMMInfo &gemm_info=GEMMInfo())
Initialise the kernel's inputs, output.
Reshaped GEMM kernel where only the rhs matrix is reshaped.
int depth_output_gemm3d() const
Depth of the output when GEMM output is reinterpreted as 3D tensor.
Definition: Types.h:2017
Status class.
Definition: Error.h:52
void run() override
Run the kernels contained in the function.
GEMMConfigResult select_mlgo_gemm_config_native(const CommonQuery &query)
Select gemm config based on mlgo heuristics.
CLGEMMKernelType
OpenCL GEMM kernel types.
Definition: CLTypes.h:31
bool is_data_type_quantized_symmetric(DataType dt)
Check if a given data type is of symmetric quantized type.
Definition: Utils.h:1025
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
GEMMLowpOutputStageType type
GEMMLowp output stage type.
Definition: Types.h:1889
OpenCL kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
GEMMLHSMatrixInfo lhs_info
LHS matrix information used to retrieve the number of rows processed by each thread.
void init(const TensorInfo &input, size_t alignment=0)
Initialize a tensor based on the passed TensorInfo.
Copyright (c) 2017-2021 Arm Limited.
bool is_b_reshaped() const
Flag which specifies if the matrix B has been reshaped.
Definition: Types.h:1999
void map(bool blocking=true)
Enqueue a map operation of the allocated buffer.
Definition: CLTensor.cpp:66
bool is_quantized_per_channel
GEMMLowp quantized per-channel flag.
Definition: Types.h:1898
std::vector< int32_t > gemmlowp_shifts
GEMMLowp output stage multiplier used for quantizing to QASYMM8.
Definition: Types.h:1896
void mark_as_unused() const
Marks a tensor as unused.
Definition: ITensor.cpp:168
1 channel, 1 S32 per channel
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
Definition: MemoryGroup.h:79
Interface to enqueue OpenCL kernels and get/set the OpenCL CommandQueue and ICLTuner.
unsigned int m
Number of LHS rows.
GEMMConfigResult select_default_gemm_config_native(const CommonQuery &query)
Select gemm config based on default heuristics.
static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
Static function to check if given info will lead to a valid configuration of CLGEMMLowpOffsetContribu...
std::string to_string(const ROIPoolingLayerInfo &pool_info)
Formatted output of the ROIPoolingInfo type.
Definition: TypePrinter.h:149
#define ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE(fmt,...)
Log information level formatted message to the core system logger.
Definition: Log.h:99
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration of CLGEMMLowpMatrixMultiply...
unsigned int n
Number of RHS columns.
static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMKernelInfo &gemm_info, const ITensorInfo *vector_sum_col=nullptr, const ITensorInfo *vector_sum_row=nullptr, const ITensorInfo *bias=nullptr, const ITensorInfo *output_multipliers=nullptr, const ITensorInfo *output_shifts=nullptr)
Static function to check if given info will lead to a valid configuration of CLGEMMLowpMatrixMultiply...
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
Definition: Utils.h:1044
GEMM RHS (Right Hand Side) matrix information.
Definition: Types.h:1918
int32_t b_offset
Offset to be added to each element of the matrix B.
~CLGEMMLowpMatrixMultiplyCore()
Default destructor.
quantized, asymmetric fixed-point 8-bit number unsigned
Coordinates of an item.
Definition: Coordinates.h:37
std::vector< int32_t > gemmlowp_multipliers
GEMMLowp output stage multiplier used for quantizing to QASYMM8.
Definition: Types.h:1895
UniformQuantizationInfo uniform() const
Return per layer quantization info.
GEMMLowpOutputStageInfo output_stage
GEMMLowp output stage information.
TensorShape compute_rhs_reshaped_shape(const ITensorInfo &a, const GEMMRHSMatrixInfo &rhs_info)
Calculate the Right Hand Side matrix reshaped shape.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
bool reinterpret_input_as_3d
Flag used to reinterpret the input as 3D.
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
GEMMLowp output stage info.
Definition: Types.h:1887
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
GEMMTypeResult select_default_gemm_kernel(const CommonQuery &query, bool reshape_b_only_on_first_run)
Select gemm type based on default heuristics.
cl::CommandQueue & queue()
Accessor for the associated CL command queue.
Definition: CLScheduler.cpp:39
bool reinterpret_input_as_3d() const
Flag which specifies if the input tensor has to be reinterpreted as 3D.
Definition: Types.h:2025
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
void enqueue(ICLKernel &kernel, bool flush=true)
Schedule the execution of the passed kernel if possible.
quantized, symmetric fixed-point 8-bit number
CLCompileContext class.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:989
bool is_a_reshaped() const
Flag which specifies if the matrix A has been reshaped.
Definition: Types.h:1991
static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info)
Static function to check if given info will lead to a valid configuration of CLGEMMLowpMatrixMultiply...
quantized, symmetric per channel fixed-point 8-bit number
TensorShape compute_reductionB_shape(const ITensorInfo &a)
Calculate the reductionB shape used in GEMMLowp.
CLGEMMLowpMatrixMultiplyCore(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Constructor.
int32_t a_offset
Offset to be added to each element of the matrix A.
unsigned int k
Number of rows for the rhs matrix.
void allocate() override
Allocate size specified by TensorInfo of OpenCL memory.
Interface for the depth conversion kernel.
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
GEMMRHSMatrixInfo rhs_info
RHS matrix information used for reshaping the RHS matrix.
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
GPUTarget
Available GPU Targets.
Definition: GPUTarget.h:34
Native GEMM kernel with configurable block size.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
GEMMTypeResult select_mlgo_gemm_kernel(const CommonQuery &query, bool reshape_b_only_on_first_run)
Select gemm type based on mlgo heuristics.
unsigned int k
Number of LHS columns or RHS rows.
static Status validate(const ITensorInfo *mtx_a, const ITensorInfo *vector_sum_row, const GEMMLowpReductionKernelInfo &info)
Static function to check if given info will lead to a valid configuration of CLGEMMLowpMatrixAReducti...
unsigned int m
Number of rows for the lhs matrix.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
Store the tensor's metadata.
Definition: TensorInfo.h:43
bool reshape_b_only_on_first_run() const
Flag which specifies if the reshape of matrix B should executed only for the first.
Definition: Types.h:2009
GEMM information class.
Definition: Types.h:1938
GEMMConfigResult select_mlgo_gemm_config_reshaped_only_rhs(const CommonQuery &query)
Select gemm config based on mlgo heuristics.
quantized, asymmetric fixed-point 8-bit number signed
unsigned int n
Number of columns for the rhs matrix.
DataType output_data_type
Output tensor data type to use if the output is not initialized.
Definition: Types.h:1899
void unmap()
Enqueue an unmap operation of the allocated and mapped buffer.
Definition: CLTensor.cpp:71
OpenCL kernel used to add the offset contribution after the matrix multiplication.
OpenCL kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
GEMMConfigResult select_default_gemm_config_reshaped_only_rhs(const CommonQuery &query)
Select gemm config based on default heuristics.
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const GEMMRHSMatrixInfo &rhs_info)
Static function to check if given info will lead to a valid configuration of CLGEMMReshapeRHSMatrixKe...
OpenCL kernel to multiply matrices with QASYMM8 data type when only the input matrix RHS (input1) has...