Compute Library
 21.05
CLGEMMMatrixMultiplyKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
32 #include "arm_compute/core/Utils.h"
35 #include "src/core/CL/CLValidate.h"
39 #include "support/StringSupport.h"
40 
41 #include <set>
42 #include <string>
43 
44 namespace arm_compute
45 {
47 
48 namespace
49 {
50 using ElementsProcessed = Steps;
51 
52 inline Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float beta,
53  bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision)
54 {
55  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output);
59  ARM_COMPUTE_RETURN_ERROR_ON_MSG((fp_mixed_precision && (input0->data_type() != DataType::F16)), "Mixed precision floating point is supported only for F16 data");
60  ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4");
61  ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the matrix B must be <= 3");
62  ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(), "The input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true");
63  ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 2 && reshape_info.reinterpret_input_as_3d(), "The input1 tensor cannot have more than 2 dimensions if input0 has to be reinterpreted as 3D");
64  ARM_COMPUTE_RETURN_ERROR_ON_MSG((reshape_info.reinterpret_input_as_3d() || reshape_info.depth_output_gemm3d() != 0) && (input2 != nullptr)
65  && (!reshape_info.broadcast_bias()),
66  "Bias addition only supported with broadcast mode in case the input or output has to be reinterpreted as 3D");
67 
68  if(!is_interleaved_transposed)
69  {
70  ARM_COMPUTE_RETURN_ERROR_ON(input0->dimension(0) != input1->dimension(1));
71 
72  if(input2 != nullptr && !(helpers::float_ops::is_zero(beta)))
73  {
74  const unsigned int m = reshape_info.reinterpret_input_as_3d() ? input0->dimension(1) * input0->dimension(2) : input0->dimension(1);
75  const unsigned int n = input1->dimension(0);
76  const unsigned int input2_dim0 = input2->dimension(0);
77  const unsigned int input2_dim1 = input2->dimension(1);
78 
80  if(reshape_info.broadcast_bias())
81  {
82  ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim1 != 1 || input2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
83  }
84  else
85  {
86  ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim0 != n || input2_dim1 != m), "Incorrect dimension of bias matrix");
87  }
88  }
89  }
90  else
91  {
92  GEMMRHSMatrixInfo rhs_info;
93  GEMMLHSMatrixInfo lhs_info;
94  const auto m = static_cast<unsigned int>(reshape_info.m());
95  const auto n = static_cast<unsigned int>(reshape_info.n());
96  const int k = reshape_info.k();
97  const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width();
98  const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
99  rhs_info.n0 = max_cl_vector_width / input1->element_size();
100  rhs_info.k0 = 1;
101  rhs_info.h0 = mult_transpose1xW_width;
102  rhs_info.interleave = false;
103  rhs_info.transpose = false;
104  lhs_info.m0 = 4;
105  lhs_info.k0 = 4;
106  lhs_info.v0 = mult_interleave4x4_height;
107  lhs_info.interleave = true;
108  lhs_info.transpose = true;
109 
110  TensorShape tensor_shape0{ input0->tensor_shape() };
111  tensor_shape0.set(0, k);
112  tensor_shape0.set(1, m);
113 
114  TensorShape tensor_shape1{ input1->tensor_shape() };
115  tensor_shape1.set(0, n);
116  tensor_shape1.set(1, k);
117 
118  const TensorInfo tensor_info0 = input0->clone()->set_tensor_shape(tensor_shape0);
119  const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1);
120 
121  const TensorInfo tensor_info_reshaped0 = input0->clone()->set_tensor_shape(compute_lhs_reshaped_shape(tensor_info0, lhs_info));
122  const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info));
123 
124  ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input0, &tensor_info_reshaped0);
125  ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1);
126 
127  if(input2 != nullptr && !(helpers::float_ops::is_zero(beta)))
128  {
129  const unsigned int input2_dim0 = input2->dimension(0);
130  const unsigned int input2_dim1 = input2->dimension(1);
131 
133  if(reshape_info.broadcast_bias())
134  {
135  ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim1 != 1 || input2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted");
136  }
137  else
138  {
139  ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim0 != n || input2_dim1 != m), "Incorrect dimension of bias matrix");
140  }
141  }
142  }
143 
144  if(output->total_size() != 0)
145  {
146  const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, is_interleaved_transposed, reshape_info));
147  ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output);
149  }
150 
151  return Status{};
152 }
153 
154 inline std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output,
155  float beta, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target,
156  ElementsProcessed &num_elements_processed)
157 {
158  ARM_COMPUTE_UNUSED(beta);
159  bool window_changed = false;
160  Window win{};
161  Window win_out{};
162 
163  const DataType data_type = input0->data_type();
164  unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0];
165  unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1];
166  bool reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
167  bool reinterpret_output_as_3d = (reshape_info.depth_output_gemm3d() != 0);
168 
169  // In case both input and output have to be reinterpreted as 3D tensors,
170  // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
171  if(reinterpret_input_as_3d == reinterpret_output_as_3d)
172  {
173  reinterpret_input_as_3d = false;
174  reinterpret_output_as_3d = false;
175  }
176 
177  // Output tensor auto inizialitation if not yet initialized
178  auto_init_if_empty(*output, input0->clone()->set_tensor_shape(compute_mm_shape(*input0, *input1, is_interleaved_transposed, reshape_info)));
179 
180  TensorInfo tmp_info(*output);
181 
182  if(reinterpret_output_as_3d)
183  {
184  // Since the output tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM,
185  // the window needs to be constructed on the 2D collapsed version of the tensor
186  TensorShape tmp_shape(output->tensor_shape());
187  tmp_shape.collapse(2U, 1U);
188  tmp_info.set_tensor_shape(tmp_shape);
189  }
190 
191  if(is_interleaved_transposed)
192  {
193  // reinterpret_input_as_3d is not supported if is_interleaved_transposed is set
194  ARM_COMPUTE_ERROR_ON(reshape_info.reinterpret_input_as_3d());
195 
196  // Configure kernel window
197  num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
198  num_elems_processed_per_iteration_y = 4;
199 
200  win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
201  if(input2 != nullptr)
202  {
203  const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
204 
205  const int bias_processed_per_iteration_y = reshape_info.broadcast_bias() ? 1 : num_elems_processed_per_iteration_y;
206 
207  AccessWindowStatic input2_access(input2, 0, 0,
208  ceil_to_multiple(input2->dimension(0), bias_processed_per_iteration_x),
209  ceil_to_multiple(input2->dimension(1), bias_processed_per_iteration_y));
210 
211  window_changed = update_window_and_padding(win, input2_access); // window used by the execute_window_loop
212  }
213  }
214  else // The input tensors have not been reshaped
215  {
216  // Special case for 1xN, 2xN, 3xN and 4xN input0 tensor. num_elems_processed_per_iteration_x is set up for the default case.
217  num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
218  num_elems_processed_per_iteration_y = std::min(static_cast<int>(output->dimension(1)), 4);
219 
220  // Create kernels according to the architecture, data type and input size.
221  GPUTarget arch_target = get_arch_from_target(gpu_target);
222  if(arch_target == GPUTarget::BIFROST && data_type == DataType::F32)
223  {
224  num_elems_processed_per_iteration_x = (input1->dimension(0) <= 1000 && input0->num_dimensions() == 1) ? 2 : 4;
225  }
226 
227  // Configure window
228  win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
229  win_out = calculate_max_window(*output, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
230  AccessWindowStatic input0_access(input0, 0, 0, input0->dimension(0), input0->dimension(1));
231  AccessWindowStatic input1_access(input1, 0, 0, ceil_to_multiple(input1->dimension(0), num_elems_processed_per_iteration_x), input1->dimension(1));
232  AccessWindowStatic output_access(output, 0, 0,
233  output->dimension(0),
234  output->dimension(1));
235 
236  if(input2 != nullptr)
237  {
238  const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x;
239 
240  AccessWindowStatic input2_access(input2, 0, 0,
241  ceil_to_multiple(input2->dimension(0), bias_processed_per_iteration_x),
242  input2->dimension(1));
243 
244  window_changed = update_window_and_padding(win, input0_access, input1_access, input2_access) || // window used by the execute_window_loop
245  update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor
246  }
247  else
248  {
249  window_changed = update_window_and_padding(win, input0_access, input1_access) || // window used by the execute_window_loop
250  update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor
251  }
252  }
253 
254  // Collapse along the Z direction
255  // This collapse needs to be here in order to tune the Z dimension of LWS
256  Window collapsed = win;
257  const unsigned int dimension_to_collapse = std::min(static_cast<unsigned int>(output->num_dimensions()), 2u);
258  collapsed = win.collapse(win, dimension_to_collapse);
259 
260  Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
261  return std::make_pair(err, collapsed);
262 }
263 } // namespace
264 
266  : _input0(nullptr), _input1(nullptr), _input2(nullptr), _output(nullptr), _slide_matrix_b(true), _reinterpret_input_as_3d(false), _reinterpret_output_as_3d(false), _add_bias(false),
267  _broadcast_bias(false)
268 {
269 }
270 
271 void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta,
272  bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision, const ActivationLayerInfo &activation_info)
273 {
274  configure(CLKernelLibrary::get().get_compile_context(), input0, input1, input2, output, alpha, beta, is_interleaved_transposed, reshape_info, fp_mixed_precision, activation_info);
275 }
276 
277 void CLGEMMMatrixMultiplyKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha,
278  float beta,
279  bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision, const ActivationLayerInfo &activation_info)
280 {
281  ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output);
282 
283  // Perform validate step
284  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr) ? input2->info() : nullptr, output->info(), beta,
285  is_interleaved_transposed, reshape_info, fp_mixed_precision));
286 
287  auto padding_info = is_interleaved_transposed ? get_padding_info({ input0, input1, output }) : get_padding_info({ input0, output });
288 
289  _input0 = input0;
290  _input1 = input1;
291  _input2 = helpers::float_ops::is_zero(beta) ? nullptr : input2;
292  _output = output;
293  _reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d();
294  _reinterpret_output_as_3d = (reshape_info.depth_output_gemm3d() != 0);
295  _add_bias = _input2 != nullptr;
296  _broadcast_bias = reshape_info.broadcast_bias();
297 
298  // In case both input and output have to be reinterpreted as 3D tensors,
299  // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false.
301  {
302  _reinterpret_input_as_3d = false;
304  }
305 
306  // Check if we need to slide the matrix B
307  const unsigned int num_dimensions_input0 = _reinterpret_input_as_3d ? _input0->info()->num_dimensions() - 1 : _input0->info()->num_dimensions();
308 
309  _slide_matrix_b = (_input1->info()->num_dimensions() >= num_dimensions_input0);
310 
311  const DataType data_type = input0->info()->data_type();
312 
313  // Get target architecture
314  GPUTarget gpu_target = get_target();
315 
316  ElementsProcessed num_elements_processed{};
317 
318  // Configure kernel window
319  auto win_config = validate_and_configure_window(input0->info(), input1->info(), (input2 != nullptr) ? input2->info() : nullptr, output->info(), beta, is_interleaved_transposed, reshape_info,
320  gpu_target, num_elements_processed);
321  ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
322  ICLKernel::configure_internal(win_config.second);
323 
324  // If _reinterpret_input_as_3d = _reinterpret_output_as_3d = true, both will be turned off (false)
325  // in which case we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel.
326  // This means that the actual m used by the kernel is given by output->info()->dimension(1)
327  const unsigned int internal_m = _reinterpret_output_as_3d ? output->info()->dimension(1) * output->info()->dimension(2) : output->info()->dimension(1);
328  const unsigned int n = output->info()->dimension(0);
329 
330  const unsigned int h_gemm_3d = _reinterpret_output_as_3d ? output->info()->dimension(1) : input0->info()->dimension(1);
331  const unsigned int d_gemm_3d = _reinterpret_output_as_3d ? output->info()->dimension(2) : input0->info()->dimension(2);
332 
333  const unsigned int m0 = num_elements_processed.y();
334  const unsigned int n0 = num_elements_processed.x();
335 
336  // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding.
337  const unsigned int partial_store_m0 = internal_m % m0;
338  const unsigned int partial_store_n0 = n % n0;
339 
340  // Create build options
341  CLBuildOptions build_opts;
342 
343  build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha));
344  build_opts.add_option_if(_input2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta));
345  build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA");
346  build_opts.add_option_if(reshape_info.broadcast_bias(), "-DBROADCAST_BIAS");
347  build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
348  build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D");
349  build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(h_gemm_3d));
350  build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(d_gemm_3d));
351  build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(input1->info()->dimension(2)));
352  build_opts.add_option_if(activation_info.enabled(), "-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(activation_info.activation())));
353  build_opts.add_option_if(activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(activation_info.a()));
354  build_opts.add_option_if(activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(activation_info.b()));
355  build_opts.add_option("-DIN1_DIM_X=" + support::cpp11::to_string(input1->info()->dimension(0)));
356 
357  const bool is_bifrost = get_arch_from_target(gpu_target) == GPUTarget::BIFROST;
358 
359  std::string kernel_name;
360  if(is_interleaved_transposed)
361  {
362  const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width();
363  const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height();
364 
365  build_opts.add_option("-DM=" + support::cpp11::to_string(internal_m));
366  build_opts.add_option("-DN=" + support::cpp11::to_string(n));
367  build_opts.add_option("-DK=" + support::cpp11::to_string(input1->info()->dimension(0) / (n0 * mult_transpose1xW_width)));
368  build_opts.add_option("-DH0=" + support::cpp11::to_string(mult_transpose1xW_width));
369  build_opts.add_option("-DV0=" + support::cpp11::to_string(mult_interleave4x4_height));
370  build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
371  build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
372 
373  if(is_data_type_float(data_type) && is_bifrost)
374  {
375  kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type)) + "_bifrost";
376  }
377  else
378  {
379  kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type));
380  if(fp_mixed_precision && data_type == DataType::F16)
381  {
382  // currently wider accumulator is only supported for fp16 kernels.
383  kernel_name += "_acc32";
384  }
385  }
386  }
387  else // The input tensors have not been reshaped
388  {
389  build_opts.add_option("-DN=" + support::cpp11::to_string(n));
390  build_opts.add_option("-DK=" + support::cpp11::to_string(input0->info()->dimension(0)));
391  build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
392  build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
393  build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
394  build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
395  build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
396 
397  // Create kernels according to the architecture, data type and input size.
398  if(is_data_type_float(data_type) && is_bifrost)
399  {
400  kernel_name = "gemm_mm_floating_point";
401 
402  if(input0->info()->num_dimensions() != 1)
403  {
405  if(fp_mixed_precision && data_type == DataType::F16)
406  {
407  // currently wider accumulator is only supported for fp16 kernels.
408  kernel_name += "_acc32";
409  }
410  }
411  else if(input1->info()->dimension(0) <= 1000 && data_type == DataType::F32)
412  {
413  // The first kernel is optimized for the case of 1000 or less output elements (e.g. FC8 of AlexNet and VGG-16, and
414  // FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 output elements (e.g.
415  // FC6 and FC7 of AlexNet and VGG-16).
416  kernel_name += "_" + lower_string(string_from_data_type(data_type)) + "_bifrost_1000";
417  }
418 
419  // The work-group size equal to the Bifrost quad size has been proved to be optimal for these kernels
420  // via exhaustive autotuning over a range of representative layer configurations.
421  set_lws_hint(cl::NDRange(4));
422  }
423  else // (MIDGARD and F32) or (F16)
424  {
425  kernel_name = "gemm_mm_floating_point";
426  }
427  }
428  // Create kernel
429  _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
430 
431  // Set config_id for enabling LWS tuning
432  _config_id = "gemm_";
433  _config_id += (is_interleaved_transposed ? "reshaped_" : "");
434  _config_id += (_add_bias ? "add_bias_" : "");
435  _config_id += (_broadcast_bias ? "broadcast_bias_" : "");
436  _config_id += (fp_mixed_precision ? "fp_mixed_" : "");
437  _config_id += (_reinterpret_input_as_3d ? "3di_" : "");
438  _config_id += (_reinterpret_output_as_3d ? "3do_" : "");
439  _config_id += lower_string(string_from_data_type(input0->info()->data_type()));
440  _config_id += "_";
441  _config_id += support::cpp11::to_string(output->info()->dimension(1));
442  _config_id += "_";
443  _config_id += support::cpp11::to_string(output->info()->dimension(0));
444  _config_id += "_";
445  _config_id += support::cpp11::to_string(output->info()->dimension(2));
446  _config_id += "_";
447  _config_id += support::cpp11::to_string(output->info()->dimension(3));
448  _config_id += "_";
449  _config_id += (is_interleaved_transposed ? support::cpp11::to_string(input1->info()->dimension(0)) : support::cpp11::to_string(input1->info()->dimension(1)));
450 
452 }
453 
454 Status CLGEMMMatrixMultiplyKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
455  bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target, bool fp_mixed_precision, const ActivationLayerInfo &activation_info)
456 {
457  // Note: num_elements_processed will be set in validate_and_configure_window()
458  ElementsProcessed num_elements_processed{};
459  ARM_COMPUTE_UNUSED(alpha);
460  ARM_COMPUTE_UNUSED(activation_info);
461  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, input2, output, beta, is_interleaved_transposed, reshape_info, fp_mixed_precision));
462  ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(),
463  input1->clone().get(),
464  (input2 != nullptr) ? input2->clone().get() : nullptr,
465  output->clone().get(),
466  beta,
467  is_interleaved_transposed,
468  reshape_info,
469  gpu_target,
470  num_elements_processed)
471  .first);
472 
473  return Status{};
474 }
475 
476 void CLGEMMMatrixMultiplyKernel::run(const Window &window, cl::CommandQueue &queue)
477 {
480 
481  if(_input1->info()->num_dimensions() < 3)
482  {
483  // The stride_z for matrix B must be zero if we do not slice
485  }
486 
488  Window slice_matrix_b = slice;
489 
490  slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
491  slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
492 
493  const unsigned int num_arguments_bias = _add_bias ? num_arguments_per_2D_tensor() + 1 : 0;
494 
496  {
497  // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor
498  const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3 + num_arguments_bias;
499  const unsigned int total_cross_plane_pad = _input0->info()->padding().top + _input0->info()->padding().bottom;
500  _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
501  }
502 
504  {
505  // Pass bottom paddings to the kernel if the output has to be reinterpreted as 3D tensor
506  const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3 + (_reinterpret_input_as_3d ? 1 : 0) + num_arguments_bias;
507  const unsigned int total_cross_plane_pad = _output->info()->padding().top + _output->info()->padding().bottom;
508  _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
509  }
510 
511  do
512  {
513  Window slice_b = slice;
514  // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
515  // This scenario can happen when the matrix multiplication is used to perform a convolution operation
516  if(!_slide_matrix_b)
517  {
518  slice_b = slice_matrix_b;
519  }
520 
521  unsigned int idx = 0;
523  add_2D_tensor_argument(idx, _input1, slice_b);
524  if(_add_bias)
525  {
527  }
529  _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input0->info()->strides_in_bytes()[2]));
530  _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input1->info()->strides_in_bytes()[2]));
531  if(_add_bias)
532  {
533  _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_input2->info()->strides_in_bytes()[2]));
534  }
535  _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(_output->info()->strides_in_bytes()[2]));
536  enqueue(queue, *this, slice, lws_hint());
537  }
539 }
540 } // namespace arm_compute
unsigned int top
top of the border
Definition: Types.h:375
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
bool is_one(float a, float epsilon=0.00001f)
Checks if the input floating point number is 1.0f checking if the difference is within a range define...
Definition: float_ops.h:97
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
void run(const Window &window, cl::CommandQueue &queue) override
Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
#define ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(tensor)
Definition: CLValidate.h:35
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
std::unordered_map< const ITensorInfo *, PaddingSize > get_padding_info(std::initializer_list< const ITensorInfo * > infos)
Stores padding information before configuring a kernel.
Definition: Utils.cpp:489
bool enabled() const
Check if initialised.
Definition: Types.h:1528
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint=CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items=false)
Add the kernel to the command queue with the given window.
Definition: ICLKernel.cpp:32
const StringSet & options() const
Gets the current options list set.
cl::NDRange lws_hint() const
Return the Local-Workgroup-Size hint.
Definition: ICLKernel.h:276
void set_lws_hint(const cl::NDRange &lws_hint)
Set the Local-Workgroup-Size hint.
Definition: ICLKernel.h:266
float a() const
Get the alpha value.
Definition: Types.h:1518
GEMM reshape information class.
Definition: Types.h:1759
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
std::string to_string(T &&value)
Convert integer and float values to string.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
TensorShape compute_mm_shape(const ITensorInfo &input0, const ITensorInfo &input1, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info)
Calculate the matrix multiplication output shape of two tensors.
1 channel, 1 F32 per channel
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
const std::string & string_from_activation_func(ActivationLayerInfo::ActivationFunction act)
Translates a given activation function to a string.
Definition: Utils.cpp:163
static CLKernelLibrary & get()
Access the KernelLibrary singleton.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Describe one of the image's dimensions with a start, end and step.
Definition: Window.h:77
unsigned int bottom
bottom of the border
Definition: Types.h:377
Status class.
Definition: Error.h:52
GPUTarget get_arch_from_target(GPUTarget target)
Helper function to get the GPU arch.
Definition: GPUTarget.cpp:189
std::string lower_string(const std::string &val)
Lower a given string.
Definition: Utils.cpp:326
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Activation Layer Information class.
Definition: Types.h:1478
static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target, bool fp_mixed_precision=false, const ActivationLayerInfo &activation_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of CLGEMMMatrixMultiplyKern...
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:159
void add_option(std::string option)
Adds option to the existing build option list.
const DataType data_type
Definition: Im2Col.cpp:150
void configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta=0.f, bool is_interleaved_transposed=true, const GEMMReshapeInfo &reshape_info=GEMMReshapeInfo(), bool fp_mixed_precision=false, const ActivationLayerInfo &activation_info=ActivationLayerInfo())
Initialise the kernel's input, output and alpha.
cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set< std::string > &build_opts=std::set< std::string >())
Creates an opencl kernel using a compile context.
Definition: CLHelpers.cpp:403
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
Definition: Utils.cpp:135
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
bool update_window_and_padding(Window &win, Ts &&... patterns)
Update window and padding size for each of the access patterns.
Definition: WindowHelpers.h:46
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
TensorShape compute_lhs_reshaped_shape(const ITensorInfo &a, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d=false)
Calculate the Left Hand Side matrix reshaped shape.
std::string float_to_string_with_full_precision(float val)
Create a string with the float in full precision.
Definition: Utils.h:1061
auto ceil_to_multiple(S value, T divisor) -> decltype(((value+divisor - 1)/divisor) *divisor)
Computes the smallest number larger or equal to value that is a multiple of divisor.
Definition: Utils.h:71
std::string kernel_name
GPUTarget get_target() const
Get the targeted GPU architecture.
Definition: ICLKernel.h:336
std::string get_cl_type_from_data_type(const DataType &dt)
Translates a tensor data type to the appropriate OpenCL type.
Definition: CLHelpers.cpp:37
TensorShape compute_rhs_reshaped_shape(const ITensorInfo &a, const GEMMRHSMatrixInfo &rhs_info)
Calculate the Right Hand Side matrix reshaped shape.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
size_t data_size_from_type(DataType data_type)
The size in bytes of the data type.
Definition: Utils.h:106
void add_option_if(bool cond, std::string option)
Adds option if a given condition is true;.
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
virtual PaddingSize padding() const =0
Padding of tensor.
static constexpr unsigned int num_arguments_per_2D_tensor()
Returns the number of arguments enqueued per 2D tensor object.
Definition: ICLKernel.h:206
bool slide_window_slice_3D(Window &slice) const
Slide the passed 3D window slice.
Definition: Window.h:335
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:915
bool has_padding_changed(const std::unordered_map< const ITensorInfo *, PaddingSize > &padding_map)
Check if the previously stored padding info has changed after configuring a kernel.
Definition: Utils.cpp:504
CLCompileContext class.
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
void add_2D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 2D tensor's parameters to the object's kernel's arguments starting from the index idx.
Definition: ICLKernel.h:148
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:439
#define ARM_COMPUTE_CREATE_ERROR(error_code, msg)
Creates an error with a given message.
Definition: Error.h:159
GPUTarget
Available GPU Targets.
Definition: GPUTarget.h:34
Manages all the OpenCL kernels compilation and caching, provides accessors for the OpenCL Context.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:541
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
Wrapper to configure the Khronos OpenCL C++ header.
bool is_zero(float a, float epsilon=0.00001f)
Checks if the input floating point number is 0.0f checking if the difference is within a range define...
Definition: float_ops.h:109
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1513
float b() const
Get the beta value.
Definition: Types.h:1523
virtual const Strides & strides_in_bytes() const =0
The strides in bytes for accessing each dimension of the tensor.
Window first_slice_window_3D() const
First 3D slice of the window.
Definition: Window.h:291
DataType
Available data types.
Definition: Types.h:77
Describe a multidimensional execution window.
Definition: Window.h:39
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:947
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:201
SimpleTensor< T > slice(const SimpleTensor< T > &src, Coordinates starts, Coordinates ends)