Compute Library
 22.05
CpuGemmAssemblyDispatch.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
27 #include "src/core/CPP/Validate.h"
33 
34 #include <arm_neon.h>
35 
36 namespace arm_compute
37 {
38 namespace cpu
39 {
40 using namespace arm_compute::experimental;
41 
42 namespace
43 {
44 struct free_delete
45 {
46  void operator()(void *x)
47  {
48  free(x);
49  }
50 };
51 
52 struct Params
53 {
54  unsigned int M;
55  unsigned int N;
56  unsigned int K;
57  unsigned int batches;
58  unsigned int multis;
59  unsigned int sections;
60  bool indirect;
61 };
62 
63 Params extract_parameters(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, const AsmGemmInfo &info)
64 {
66  Params p;
67  p.M = d->tensor_shape().y();
68  p.K = a->tensor_shape().x();
69  p.N = d->tensor_shape().x();
70  p.batches = 1;
71  p.multis = 1;
72  p.sections = 1;
73  p.indirect = false;
74 
75  if(info.method == AsmConvMethod::Conv || info.method == AsmConvMethod::Indirect)
76  {
77  p.indirect = true;
78  p.sections = b->tensor_shape()[2] * b->tensor_shape()[3];
79  }
80  else
81  {
82  p.multis = b->tensor_shape().z();
83  p.batches = d->tensor_shape().total_size_upper(2) / p.multis;
84  }
85 
86  // Update M in case of GEMM3D for output
87  if(info.depth_output_gemm3d != 0)
88  {
89  p.M = d->tensor_shape().y() * d->tensor_shape().z();
90  p.batches = d->tensor_shape().total_size_upper(3) / p.multis;
91  }
92 
93  return p;
94 }
95 
96 IScheduler::Hints scheduling_hint_heuristic(arm_gemm::GemmMethod method, DataType data_type)
97 {
98  // Schedule assembly kernel
99  const int granule_threshold = 200;
100  IScheduler::Hints scheduling_hint = IScheduler::Hints(Window::DimX);
101  if(method == arm_gemm::GemmMethod::GEMM_INTERLEAVED && data_type == DataType::F32)
102  {
103  scheduling_hint = IScheduler::Hints(Window::DimX, IScheduler::StrategyHint::DYNAMIC, granule_threshold);
104  }
105  else if(method == arm_gemm::GemmMethod::GEMM_INTERLEAVED_2D && (data_type == DataType::F32 || data_type == DataType::F16 || data_type == DataType::U8 || data_type == DataType::S8))
106  {
107  //GEMM_INTERLEAVED supports 2D parallelism, IScheduler::split_dimensions_all signals to parallelise over all window dimensions
108  scheduling_hint = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold);
109  }
110  else if(method == arm_gemm::GemmMethod::QUANTIZE_WRAPPER_2D && (data_type == DataType::QASYMM8 || data_type == DataType::QASYMM8_SIGNED))
111  {
112  //special case for QASYMM8 to support 2D parallelism, scheduler here may be tweaked differently compared to FP32 case
113  scheduling_hint = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold);
114  }
115 
116  return scheduling_hint;
117 }
118 
119 /** Fallback in case ACL doesn't have a function */
120 template <typename TypeInput, typename TypeOutput, class OutputStage = arm_gemm::Nothing>
121 class Fallback : public CpuGemmAssemblyDispatch::IFallback
122 {
123 public:
124  /** Destructor */
125  ~Fallback() = default;
126 
127  /** Initialise the functions's input and output.
128  *
129  * @param[in] a Input tensor containing the Matrix A.
130  * @param[in] b Input tensor containing the Matrix B.
131  * @param[in] c Input tensor containing the Matrix C.
132  * @param[out] d Output tensor to store the result of matrix multiplication.
133  * @param[in] args Matrix multiplication information.
134  * @param[in] gemm_info GEMM meta-data
135  * @param[in] os Output stage meta-data.
136  */
137  void configure(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, ITensorInfo *d,
138  arm_gemm::GemmArgs args, const AsmGemmInfo &gemm_info,
139  const OutputStage &os = {});
140 
141  /** Set requantization shifts to be used
142  *
143  * @param[in] shifts Requantization shifts
144  *
145  * @return Pointer to the shift data
146  */
147  /** Set requantization data to be used
148  *
149  *
150  * @param shifts Requantization shifts
151  * @param multipliers Requantization multipliers
152  *
153  * @return A tuple with the pointers to the shift and multiplier data respectively
154  */
155  std::tuple<bool, const int32_t *, const int32_t *, const int32_t *> set_requantize_data(const std::vector<int32_t> &shifts,
156  const std::vector<int32_t> &multipliers);
157 
158  // Inherited methods overridden:
159  void run(ITensorPack &tensors) override;
160  void prepare(ITensorPack &tensors) override;
161  bool is_configured() const override;
162  experimental::MemoryRequirements workspace() const override;
163 
164 private:
165  enum AuxTensorIdx
166  {
167  AsmGemmWorkspace = 0,
168  Pretranspose,
169  Count
170  };
171 
172  /** Configure the indirect buffer
173  *
174  * @param[in] a Input tensor containing the Matrix A.
175  * @param[in] b Input tensor containing the Matrix B.
176  * @param[out] d Output tensor to store the result of matrix multiplication.
177  * @param[in] info GEMM meta-data
178  */
179  void configure_indirect(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, const AsmGemmInfo &info);
180  /** Prepare the indirect buffer */
181  void prepare_indirect_buffer(ITensorPack &tensors);
182 
183  /** Assembly Gemm kernel */
184  std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> _gemm_kernel_asm{ nullptr };
185  /** Optimised Arm® Neon™ kernel */
186  std::unique_ptr<INEKernel> _optimised_kernel{ nullptr };
187  /** Assembly GEMM workspace tensor info */
188  TensorInfo _workspace_info{};
189  /** Pre-transpose tensor info */
190  TensorInfo _pretranspose_info{};
191  /** Prepared flag */
192  bool _is_prepared{ false };
193  /** GEMM meta-data */
194  AsmGemmInfo _gemm_info{};
195  /** GEMM kernel description */
196  arm_gemm::KernelDescription _kernel_info{};
197  /** Per channel quantization shifts */
198  std::vector<int32_t> _shifts{};
199  std::vector<int32_t> right_shifts{};
200  std::vector<int32_t> left_shifts{};
201  /** Per channel quantization multipliers */
202  std::vector<int32_t> _multipliers{};
203  /** Indirect buffer */
204  std::unique_ptr<const TypeInput *const *, free_delete> _indirect_arg{};
205  std::unique_ptr<const TypeInput *, free_delete> _indirect_buf{};
206  std::vector<TypeInput> _indirect_pad{};
208  experimental::MemoryRequirements _aux_mem{ Count };
209  bool _B_pretranspose_required{ false };
210  bool _is_b_constant{ true };
211  bool _is_c_constant{ true };
212 };
213 
214 template <typename TypeInput, typename TypeOutput, class OutputStage>
215 std::tuple<bool, const int32_t *, const int32_t *, const int32_t *>
216 Fallback<TypeInput, TypeOutput, OutputStage>::set_requantize_data(const std::vector<int32_t> &shifts, const std::vector<int32_t> &multipliers)
217 {
218  _multipliers = multipliers;
219  _shifts = shifts;
220  bool need_left = false;
221  for(const auto s : _shifts)
222  {
223  left_shifts.push_back(std::max(-s, int32_t(0)));
224  right_shifts.push_back(std::min(-s, int32_t(0)));
225  if(s < 0 && !need_left)
226  {
227  need_left = true;
228  }
229  }
230  return std::make_tuple(need_left, left_shifts.data(), right_shifts.data(), _multipliers.data());
231 }
232 
233 template <typename TypeInput, typename TypeOutput, class OutputStage>
234 void Fallback<TypeInput, TypeOutput, OutputStage>::prepare_indirect_buffer(ITensorPack &tensors)
235 {
236  auto a = tensors.get_const_tensor(TensorType::ACL_SRC_0);
237  const TypeInput *A_ptr = reinterpret_cast<TypeInput *>(a->buffer());
238  const int multis = 1;
239  const int batches = a->info()->tensor_shape().total_size_upper(3);
240  const size_t stride_A = a->info()->strides_in_bytes().y() / sizeof(TypeInput);
241  const size_t batch_stride_A = a->info()->strides_in_bytes()[3] / sizeof(TypeInput);
242  const size_t multi_stride_A = a->info()->strides_in_bytes()[4] / sizeof(TypeInput);
243 
244  const size_t output_hw = _cp.output_height * _cp.output_width;
245  const int batch_size = _cp.kernel_height * _cp.kernel_width * output_hw * sizeof(TypeInput);
246  const size_t batch_stride = batch_size / sizeof(TypeInput);
247  const int multi_size = batch_size * batches;
248  const size_t multi_stride = multi_size / sizeof(TypeInput);
249 
250  for(int64_t m = 0; m < multis; m++)
251  {
252  for(int64_t b = 0; b < batches; b++)
253  {
254  for(int64_t output_y = 0; output_y < _cp.output_height; output_y++)
255  {
256  for(int64_t output_x = 0; output_x < _cp.output_width; output_x++)
257  {
258  int64_t output_xy = (output_y * _cp.output_width) + output_x;
259 
260  for(int64_t kernel_y = 0; kernel_y < _cp.kernel_height; kernel_y++)
261  {
262  for(int64_t kernel_x = 0; kernel_x < _cp.kernel_width; kernel_x++)
263  {
264  int64_t input_x = (output_x * _cp.output_stride_w) + kernel_x - _cp.padding_left;
265  int64_t input_y = (output_y * _cp.output_stride_h) + kernel_y - _cp.padding_top;
266  int64_t kernel_xy = (kernel_y * _cp.kernel_width) + kernel_x;
267  int64_t input_xy = (input_y * _cp.input_width) + input_x;
268 
269  if(input_x < 0 || input_x >= _cp.input_width || input_y < 0 || input_y >= _cp.input_height)
270  {
271  _indirect_buf.get()[m * multi_stride + b * batch_stride + kernel_xy * output_hw + output_xy] = _indirect_pad.data();
272  }
273  else
274  {
275  _indirect_buf.get()[m * multi_stride + b * batch_stride + kernel_xy * output_hw + output_xy] =
276  A_ptr + (m * multi_stride_A + b * batch_stride_A + input_xy * stride_A);
277  }
278  }
279  }
280  }
281  }
282  }
283  }
284 }
285 
286 template <typename TypeInput, typename TypeOutput, class OutputStage>
287 void Fallback<TypeInput, TypeOutput, OutputStage>::configure_indirect(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, const AsmGemmInfo &info)
288 {
289  ARM_COMPUTE_ERROR_ON(!(info.method == AsmConvMethod::Conv || info.method == AsmConvMethod::Indirect));
290 
291  float zeropad = 0.f;
292  if(is_data_type_quantized(a->data_type()))
293  {
294  zeropad = a->quantization_info().uniform().offset;
295  }
296 
297  const int64_t input_width = static_cast<int64_t>(a->tensor_shape()[1]);
298  const int64_t input_height = static_cast<int64_t>(a->tensor_shape()[2]);
299  const int64_t input_channels = static_cast<int64_t>(a->tensor_shape()[0]);
300  const int64_t kernel_width = static_cast<int64_t>(b->tensor_shape()[2]);
301  const int64_t kernel_height = static_cast<int64_t>(b->tensor_shape()[3]);
302  const int64_t output_width = static_cast<int64_t>(d->tensor_shape()[1]);
303  const int64_t output_height = static_cast<int64_t>(d->tensor_shape()[2]);
304 
305  _cp = { input_width, input_height, input_channels, kernel_width, kernel_height, output_width, output_height,
306  info.ps_info.stride().first, info.ps_info.stride().second, info.padding_top, info.padding_left, zeropad
307  };
308 
309  if(info.method == AsmConvMethod::Conv)
310  {
311  _gemm_kernel_asm->set_convolution_parameters(_cp);
312  }
313 
314  if(info.method == AsmConvMethod::Indirect)
315  {
316  const unsigned int multis = 1;
317  const unsigned int batches = a->tensor_shape().total_size_upper(3);
318  const unsigned int kernel_hw = _cp.kernel_width * _cp.kernel_height;
319  const unsigned int output_hw = _cp.output_width * _cp.output_height;
320 
321  using TypeInputPtr = TypeInput *;
322  const int batch_size = kernel_hw * output_hw * sizeof(TypeInputPtr);
323  const size_t batch_stride = batch_size / sizeof(TypeInputPtr);
324  const int multi_size = batch_size * batches;
325  const size_t multi_stride = multi_size / sizeof(TypeInputPtr);
326 
327  _indirect_buf = std::unique_ptr<const TypeInput *, free_delete>(reinterpret_cast<const TypeInput **>(malloc(multi_size * multis)));
328  _indirect_arg = std::unique_ptr<const TypeInput *const *, free_delete>(reinterpret_cast<const TypeInput *const **>(malloc(sizeof(TypeInput **) * kernel_hw * multis * batches)));
329  _indirect_pad = std::vector<TypeInput>(_cp.input_channels, TypeInput(zeropad));
330 
331  // Set indirect argument
332  int64_t pos = 0;
333  for(int64_t m = 0; m < multis; m++)
334  {
335  for(int64_t b = 0; b < batches; b++)
336  {
337  for(int64_t kernel_xy = 0; kernel_xy < kernel_hw; kernel_xy++)
338  {
339  (_indirect_arg.get())[pos++] = _indirect_buf.get() + m * multi_stride + b * batch_stride + kernel_xy * output_hw;
340  }
341  }
342  }
343 
344  _gemm_kernel_asm->set_indirect_parameters(a->tensor_shape()[0], _indirect_arg.get());
345  }
346 }
347 
348 template <typename TypeInput, typename TypeOutput, class OutputStage>
349 void Fallback<TypeInput, TypeOutput, OutputStage>::configure(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, ITensorInfo *d,
350  arm_gemm::GemmArgs args, const AsmGemmInfo &gemm_info,
351  const OutputStage &os)
352 {
354 
355  _is_b_constant = b->are_values_constant();
356  _is_c_constant = c ? c->are_values_constant() : true;
357 
358  _gemm_kernel_asm = arm_gemm::gemm<TypeInput, TypeOutput, OutputStage>(args, os);
359  if(_gemm_kernel_asm == nullptr)
360  {
361  //configuration not supported: Leave function unconfigured:
362  return;
363  }
364 
365  arm_gemm::GemmConfig gemm_cfg = _gemm_kernel_asm->get_config();
366 
367  // arm_compute wrapper for the Gemm object (see above)
368  auto acl_gemm_wrapper = std::make_unique<kernel::CpuGemmAssemblyWrapperKernel<TypeInput, TypeOutput>>();
369  ARM_COMPUTE_ERROR_ON(acl_gemm_wrapper == nullptr);
370  acl_gemm_wrapper->configure(_gemm_kernel_asm.get(), gemm_cfg.filter);
371  const size_t workspace_size = _gemm_kernel_asm->get_working_size();
372  const unsigned int alignment = 4096;
373  _workspace_info = TensorInfo(TensorShape(workspace_size), 1, DataType::U8);
374  _aux_mem[AsmGemmWorkspace] = MemoryInfo(offset_int_vec(AsmGemmWorkspace), MemoryLifetime::Temporary, workspace_size, alignment);
375 
376  //if we disable this code below in brackets then ConvLayer deadlocks when threads > 1 and
377  //the shapes are In=1x1x1024 Weights=1x1x1024x1001 Biases=1001 Out=1x1x1001
378  {
379  const unsigned int window_size = _gemm_kernel_asm->get_window_size().total_size();
380  if(window_size < static_cast<unsigned int>(args._maxthreads))
381  {
382  _gemm_kernel_asm->set_nthreads(window_size);
383  }
384  }
385 
386  _optimised_kernel = std::move(acl_gemm_wrapper);
387  _gemm_info = gemm_info;
388  // Check for pre-transposed support
389  if(_gemm_kernel_asm->B_pretranspose_required())
390  {
391  // Forcing 128-byte alignment (required by 32-bit kernels)
392  const unsigned int alignment = 128;
393  const size_t B_pretranspose_size = _gemm_kernel_asm->get_B_pretransposed_array_size();
394  _pretranspose_info = TensorInfo(TensorShape(B_pretranspose_size), 1, DataType::U8);
395  _aux_mem[Pretranspose] = MemoryInfo(offset_int_vec(Pretranspose), MemoryLifetime::Persistent, B_pretranspose_size, alignment);
396  _B_pretranspose_required = true;
397  }
398 
399  // Handle indirect GEMM convolution
400  if(gemm_info.method == AsmConvMethod::Conv || gemm_info.method == AsmConvMethod::Indirect)
401  {
402  configure_indirect(a, b, d, gemm_info);
403  }
404 }
405 
406 template <typename TypeInput, typename TypeOutput, class OutputStage>
407 void Fallback<TypeInput, TypeOutput, OutputStage>::prepare(ITensorPack &tensors)
408 {
409  if(!_is_prepared)
410  {
411  auto b = tensors.get_const_tensor(TensorType::ACL_SRC_1);
412  auto c = tensors.get_const_tensor(TensorType::ACL_SRC_2);
413 
414  // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C.
415  if(c && c->info()->data_type() == DataType::S32)
416  {
417  _gemm_kernel_asm->set_quantized_bias(reinterpret_cast<const int32_t *>(c->buffer() + c->info()->offset_first_element_in_bytes()), 0);
418  }
419 
420  // Pretranspose B if required
421  if(_gemm_kernel_asm->B_pretranspose_required())
422  {
423  const int ldb = b->info()->strides_in_bytes().y() / sizeof(TypeInput);
424  const auto in1_ptr = reinterpret_cast<const TypeInput *>(b->buffer() + b->info()->offset_first_element_in_bytes());
425  const int multi_stride_b = b->info()->strides_in_bytes().z() / sizeof(TypeInput);
426 
427  CpuAuxTensorHandler pretranspose(offset_int_vec(Pretranspose), _pretranspose_info, tensors, false);
428  ARM_COMPUTE_ERROR_ON(pretranspose.get()->buffer() == nullptr);
429  _gemm_kernel_asm->pretranspose_B_array(pretranspose.get()->buffer(), in1_ptr, ldb, multi_stride_b);
430 
431  b->mark_as_unused();
432  }
433 
434  if(_gemm_info.method == AsmConvMethod::Indirect)
435  {
436  prepare_indirect_buffer(tensors);
437  }
438 
439  _is_prepared = true;
440  }
441 }
442 
443 template <typename TypeInput, typename TypeOutput, class OutputStage>
444 bool Fallback<TypeInput, TypeOutput, OutputStage>::is_configured() const
445 {
446  return _optimised_kernel != nullptr;
447 }
448 
449 template <typename TypeInput, typename TypeOutput, class OutputStage>
450 experimental::MemoryRequirements Fallback<TypeInput, TypeOutput, OutputStage>::workspace() const
451 {
452  return _aux_mem;
453 }
454 
455 template <typename TypeInput, typename TypeOutput, class OutputStage>
456 void Fallback<TypeInput, TypeOutput, OutputStage>::run(ITensorPack &tensors)
457 {
458  auto a = tensors.get_const_tensor(TensorType::ACL_SRC_0);
459  auto b = tensors.get_const_tensor(TensorType::ACL_SRC_1);
460  auto c = tensors.get_const_tensor(TensorType::ACL_SRC_2);
461  auto d = tensors.get_tensor(TensorType::ACL_DST);
462 
463  int lda = a->info()->strides_in_bytes().y() / sizeof(TypeInput);
464  int ldb = 0;
465  const int ldd = d->info()->strides_in_bytes().y() / sizeof(TypeOutput);
466 
467  const size_t a_batch_idx = _gemm_info.reinterpret_input_as_3d != 0 ? 3 : 2;
468  const size_t a_multi_idx = a_batch_idx + 1;
469  const size_t d_batch_idx = _gemm_info.depth_output_gemm3d != 0 ? 3 : 2;
470  const size_t d_multi_idx = d_batch_idx + 1;
471 
472  int batch_stride_a = a->info()->strides_in_bytes()[a_batch_idx] / sizeof(TypeInput);
473  const int batch_stride_d = d->info()->strides_in_bytes()[d_batch_idx] / sizeof(TypeOutput);
474 
475  int multi_stride_a = a->info()->strides_in_bytes()[a_multi_idx] / sizeof(TypeInput);
476  int multi_stride_b = 0;
477  const int multi_stride_d = d->info()->strides_in_bytes()[d_multi_idx] / sizeof(TypeOutput);
478 
479  auto in0_ptr = reinterpret_cast<const TypeInput *>(a->buffer() + a->info()->offset_first_element_in_bytes());
480  const TypeInput *in1_ptr = nullptr;
481  auto out_ptr = reinterpret_cast<TypeOutput *>(d->buffer() + d->info()->offset_first_element_in_bytes());
482 
483  // Check if B is pre-tranposed and de-reference if not
484  if(!_gemm_kernel_asm->B_is_pretransposed())
485  {
486  ldb = b->info()->strides_in_bytes().y() / sizeof(TypeInput);
487  multi_stride_b = b->info()->strides_in_bytes().z() / sizeof(TypeInput);
488  in1_ptr = reinterpret_cast<const TypeInput *>(b->buffer() + b->info()->offset_first_element_in_bytes());
489  }
490 
491  // If necessary, run pretranspose every time if either weights or biases are non-constant
492  if((b && !_is_b_constant) || (c && !_is_c_constant && c->info()->data_type() == DataType::S32))
493  {
494  if(c && c->info()->data_type() == DataType::S32)
495  {
496  _gemm_kernel_asm->set_quantized_bias(reinterpret_cast<const int32_t *>(c->buffer() + c->info()->offset_first_element_in_bytes()), 0);
497  }
498 
499  // Pretranspose B if required
500  if(_B_pretranspose_required)
501  {
502  const int ldb = b->info()->strides_in_bytes().y() / sizeof(TypeInput);
503  const auto b_ptr = reinterpret_cast<const TypeInput *>(b->buffer() + b->info()->offset_first_element_in_bytes());
504  const int multi_stride_b = b->info()->strides_in_bytes().z() / sizeof(TypeInput);
505 
506  CpuAuxTensorHandler pretranspose(offset_int_vec(Pretranspose), _pretranspose_info, tensors, true);
507  ARM_COMPUTE_ERROR_ON(pretranspose.get()->buffer() == nullptr);
508 
509  if(_is_b_constant)
510  {
511  _gemm_kernel_asm->requantize_bias(pretranspose.get()->buffer(), b_ptr, ldb, multi_stride_b);
512  }
513  else
514  {
515  _gemm_kernel_asm->pretranspose_B_array(pretranspose.get()->buffer(), b_ptr, ldb, multi_stride_b);
516  }
517  }
518  }
519 
520  const auto scheduling_hint = scheduling_hint_heuristic(_kernel_info.method, d->info()->data_type());
521 
522  // Set workspace if needed and reset number of threads as buffer manager gets re-created with max_threads
523  CpuAuxTensorHandler workspace(offset_int_vec(AsmGemmWorkspace), _workspace_info, tensors, false);
524  if(workspace.get()->buffer() != nullptr)
525  {
526  _gemm_kernel_asm->set_working_space(reinterpret_cast<void *>(workspace.get()->buffer()));
527  const unsigned int split_dim = scheduling_hint.split_dimension();
528  const unsigned int window_size = _gemm_kernel_asm->get_window_size().total_size();
529  unsigned int num_threads = NEScheduler::get().num_threads();
530  if(window_size < num_threads)
531  {
532  num_threads = window_size;
533  }
534  if(split_dim != IScheduler::split_dimensions_all)
535  {
536  // Make sure the kernel does not expect more threads than we can actually spawn
537  const unsigned int num_iterations = _optimised_kernel.get()->window().num_iterations(split_dim);
538  num_threads = std::min(num_iterations, num_threads);
539  }
540  _gemm_kernel_asm->set_nthreads(num_threads);
541  }
542 
543  // Prepare assembly kernel
544  prepare(tensors);
545 
546  // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C.
547  TypeOutput *bias = nullptr;
548  if(c && c->info()->data_type() != DataType::S32)
549  {
550  bias = reinterpret_cast<TypeOutput *>(c->buffer() + c->info()->offset_first_element_in_bytes());
551  }
552 
553  if(_gemm_info.method == AsmConvMethod::Indirect)
554  {
555  in0_ptr = nullptr;
556  lda = 0;
557  batch_stride_a = 0;
558  multi_stride_a = 0;
559  }
560 
561  // Set gemm parameters
562  _gemm_kernel_asm->set_arrays(in0_ptr, lda, batch_stride_a, multi_stride_a,
563  in1_ptr, ldb, multi_stride_b,
564  out_ptr, ldd, batch_stride_d, multi_stride_d,
565  bias, 0);
566  // Schedule
567  NEScheduler::get().schedule(_optimised_kernel.get(), scheduling_hint);
568 }
569 
570 template <typename TypeInput, typename TypeOutput>
571 void create_arm_gemm(std::unique_ptr<CpuGemmAssemblyDispatch::IFallback> &arm_gemm,
572  const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, ITensorInfo *d,
573  arm_gemm::Activation activation, const AsmGemmInfo &info)
574 {
575  Params p = extract_parameters(a, b, d, info);
576  const CPUInfo &ci = NEScheduler::get().cpu_info();
577  unsigned int num_threads = NEScheduler::get().num_threads();
578 
579  arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, activation, num_threads, info.fast_mode);
580 
581  // Create arm_gemm fallback
582  auto fallback = std::make_unique<Fallback<TypeInput, TypeOutput>>();
583  fallback->configure(a, b, c, d, args, info);
584  arm_gemm = std::move(fallback);
585 }
586 
587 template <typename TypeInput, typename TypeOutput>
588 void create_arm_gemm_quant(std::unique_ptr<CpuGemmAssemblyDispatch::IFallback> &arm_gemm,
589  const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, ITensorInfo *d,
590  arm_gemm::Activation activation, const AsmGemmInfo &info)
591 {
592  ARM_COMPUTE_UNUSED(activation);
593  Params p = extract_parameters(a, b, d, info);
594  const CPUInfo &ci = NEScheduler::get().cpu_info();
595  const unsigned int num_threads = NEScheduler::get().num_threads();
596 
597  arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, activation, num_threads, info.fast_mode);
598 
599  // Create arm_gemm fallback
600  auto fallback = std::make_unique<Fallback<TypeInput, TypeOutput, arm_gemm::Requantize32>>();
601 
602  // Configure requantization info
603  const int32_t negation = info.negated_offsets ? 1 : -1;
604  const int32_t a_offset = -a->quantization_info().uniform().offset * negation;
605  const int32_t b_offset = -b->quantization_info().uniform().offset * negation;
606  const GEMMLowpOutputStageInfo os_info = info.output_stage;
607 
608  arm_gemm::Requantize32 gemm_requant_info{};
609  if(os_info.gemmlowp_shifts.size() > 1)
610  {
611  const auto requantize_data = fallback->set_requantize_data(os_info.gemmlowp_shifts, os_info.gemmlowp_multipliers);
612  gemm_requant_info = arm_gemm::Requantize32(nullptr, 0,
613  a_offset, b_offset, os_info.gemmlowp_offset,
614  (std::get<0>(requantize_data)) ? std::get<1>(requantize_data) : nullptr,
615  std::get<2>(requantize_data),
616  std::get<3>(requantize_data),
617  os_info.gemmlowp_min_bound, os_info.gemmlowp_max_bound);
618  }
619  else
620  {
621  gemm_requant_info = arm_gemm::Requantize32(nullptr, 0,
622  a_offset, b_offset, os_info.gemmlowp_offset,
623  -os_info.gemmlowp_shift, os_info.gemmlowp_multiplier,
624  os_info.gemmlowp_min_bound, os_info.gemmlowp_max_bound);
625  }
626 
627  // Configure fallback
628  fallback->configure(a, b, c, d, args, info, gemm_requant_info);
629  arm_gemm = std::move(fallback);
630 }
631 } //namespace
632 
634  : _arm_gemm(nullptr)
635 {
636 }
637 
639 {
643  Params p = extract_parameters(a, b, d, info);
644  const CPUInfo &ci = NEScheduler::get().cpu_info();
645  unsigned int num_threads = NEScheduler::get().num_threads();
646 
647  arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, act, num_threads, info.fast_mode);
648  switch(a->data_type())
649  {
650  case DataType::F32:
651  ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(arm_gemm::has_opt_gemm<float, float, arm_gemm::Nothing>(args, {})),
652  "We could not find an optimized kernel for F32 input");
653  break;
654 #ifdef __aarch64__
655  case DataType::U8:
656  case DataType::QASYMM8:
657  if(d->data_type() == DataType::S32)
658  {
659  ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(arm_gemm::has_opt_gemm<uint8_t, uint32_t, arm_gemm::Nothing>(args, {})),
660  "We could not find an optimized kernel for U8/QASYMM8 input and S32 output");
661  }
662  else
663  {
664  ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(arm_gemm::has_opt_gemm<uint8_t, uint8_t, arm_gemm::Requantize32>(args, {})),
665  "We could not find an optimized kernel for U8 input and U8 output");
666  }
667  break;
668  case DataType::S8:
670  if(d->data_type() == DataType::S32)
671  {
672  ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(arm_gemm::has_opt_gemm<int8_t, int32_t, arm_gemm::Nothing>(args, {})),
673  "We could not find an optimized kernel for S8/QASYMM8_SIGNED input and S32 output");
674  }
675  else
676  {
677  ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(arm_gemm::has_opt_gemm<int8_t, int8_t, arm_gemm::Requantize32>(args, {})),
678  "We could not find an optimized kernel for S8 input and S32 output");
679  }
680  break;
681 #endif /* __aarch64__ */
682 #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
683  case DataType::BFLOAT16:
684  {
685  ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(arm_gemm::has_opt_gemm<bfloat16, float, arm_gemm::Nothing>(args, {})),
686  "We could not find an optimized kernel for BFLOAT16 input and F32 output");
687  break;
688  }
689 #endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
690 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
691  case DataType::F16:
692  ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(arm_gemm::has_opt_gemm<float16_t, float16_t, arm_gemm::Nothing>(args, {})),
693  "We could not find an optimized kernel for BFLOAT16 input and F32 output");
694  break;
695 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
696  default:
697  ARM_COMPUTE_RETURN_ERROR_ON_MSG(true, "Usupported type. Could not find a kernel");
698  break;
699  }
700 
701  return Status{};
702 }
703 
705 {
706  ARM_COMPUTE_UNUSED(c, info);
710 
711 #ifndef __aarch64__
712  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->element_size() == 1, "8bit integer types only supported for aarch64");
713 #endif /* __aarch64__ */
719  {
721  }
722  else
723  {
725  }
726  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F32 && d->data_type() != DataType::F32, "Only F32 output supported for F32 input");
727  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F16 && d->data_type() != DataType::F16, "Only F16 output supported for F16 input");
728  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::BFLOAT16 && d->data_type() != DataType::F32, "Only F32 output supported for BFLOAT16 input");
729  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::U8 && d->data_type() != DataType::U32, "Only U32 output supported for U8 input");
730  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::S8 && d->data_type() != DataType::S32, "Only S32 output supported for S8 input");
731  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::QASYMM8 && d->data_type() != DataType::QASYMM8, "Only QASYMM8 output supported for QASYMM8 input");
732  return CpuGemmAssemblyDispatch::has_opt_impl(a, b, c, d, info);
733 }
734 
736 {
739 }
740 
742 {
745 
746  //If we don't support a combination of data types, silently return: it is the caller's responsibility to check if configure() was successful via is_configured()
747  if(!CpuGemmAssemblyDispatch::validate(a, b, c, d, info))
748  {
749  return;
750  }
751 
752  switch(a->data_type())
753  {
754  case DataType::F32:
755  create_arm_gemm<float, float>(_arm_gemm, a, b, c, d, act, info);
756  break;
757 #ifdef __aarch64__
758  case DataType::U8:
759  case DataType::QASYMM8:
760  if(d->data_type() == DataType::S32)
761  {
762  create_arm_gemm<uint8_t, uint32_t>(_arm_gemm, a, b, c, d, act, info);
763  }
764  else
765  {
766  create_arm_gemm_quant<uint8_t, uint8_t>(_arm_gemm, a, b, c, d, act, info);
767  }
768  break;
769  case DataType::S8:
771  if(d->data_type() == DataType::S32)
772  {
773  create_arm_gemm<int8_t, int32_t>(_arm_gemm, a, b, c, d, act, info);
774  }
775  else
776  {
777  create_arm_gemm_quant<int8_t, int8_t>(_arm_gemm, a, b, c, d, act, info);
778  }
779  break;
780 #endif /* __aarch64__ */
781 #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
782  case DataType::BFLOAT16:
783  create_arm_gemm<bfloat16, float>(_arm_gemm, a, b, c, d, act, info);
784  break;
785 #endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
786 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
787  case DataType::F16:
788  create_arm_gemm<float16_t, float16_t>(_arm_gemm, a, b, c, d, act, info);
789  break;
790 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
791  default:
792  break;
793  }
794 }
795 
797 {
798  ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr);
799  _arm_gemm->prepare(tensors);
800 }
801 
803 {
804  return _arm_gemm != nullptr && _arm_gemm->is_configured();
805 }
806 
808 {
809  ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr);
810  _arm_gemm->run(tensors);
811 }
812 
814 {
815  ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr);
816  return _arm_gemm->workspace();
817 }
818 } // namespace cpu
819 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:1030
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
Definition: Validate.h:115
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(tensor)
Definition: Validate.h:121
SimpleTensor< float > b
Definition: DFT.cpp:157
1 channel, 1 U8 per channel
static bool is_activation_supported(const ActivationLayerInfo &activation)
Checks if activation is supported by the gemm assembly dispatcher.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
Split the workload evenly among the threads.
static constexpr unsigned int split_dimensions_all
When arm_compute::ISchedular::Hints::_split_dimension is initialized with this value then the schedul...
Definition: IScheduler.h:62
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
const size_t input_height
Definition: impl.cpp:61
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
unsigned int M
CPUInfo & cpu_info()
Get CPU info.
Definition: IScheduler.cpp:41
Status class.
Definition: Error.h:52
Activation Layer Information class.
Definition: Types.h:1625
const size_t input_width
Definition: impl.cpp:62
Copyright (c) 2017-2022 Arm Limited.
const CPUInfo & ci
std::vector< MemoryInfo > MemoryRequirements
Definition: Types.h:134
1 channel, 1 F16 per channel
unsigned int multis
Split the workload dynamically using a bucket system.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:159
1 channel, 1 S32 per channel
16-bit brain floating-point number
void run(ITensorPack &tensors) override
Run the kernels contained in the function.
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, const AsmGemmInfo &info)
Indicates whether or not this function can be used to process the given parameters.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
1 channel, 1 U32 per channel
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
Definition: Utils.h:1107
void configure(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, ITensorInfo *d, const AsmGemmInfo &info)
If supported create a Compute Library function else fallback to the arm_gemm function.
quantized, asymmetric fixed-point 8-bit number unsigned
unsigned int N
virtual size_t element_size() const =0
Element size in bytes calculated as data_size() * num_channels()
bool is_configured() const
Was the function successfully configured ?
void prepare(ITensorPack &tensors) override
Prepare the function for executing.
quantized, symmetric per channel fixed-point 8-bit number
unsigned int sections
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
virtual void schedule(ICPPKernel *kernel, const Hints &hints)=0
Runs the kernel in the same thread as the caller synchronously.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:541
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
Tensor packing service.
Definition: ITensorPack.h:39
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
static Status has_opt_impl(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, const AsmGemmInfo &info)
Indicates whether or not there is an optimal assembly implementation that can be used to process the ...
int offset_int_vec(int offset)
Definition: MemoryHelpers.h:38
std::string filter
Definition: arm_gemm.hpp:69
quantized, asymmetric fixed-point 8-bit number signed
unsigned int batches
arm_gemm::Activation map_to_arm_gemm_activation(const ActivationLayerInfo &act)
Performs a mapping between Compute Library ActivationLayerInfo and the assembly Activation structure...
virtual unsigned int num_threads() const =0
Returns the number of threads that the SingleThreadScheduler has in its pool.
im2col_func configure(src_target.info(), dst_target.info(), spatial_kernel, conv_info, has_bias)
DataType
Available data types.
Definition: Types.h:79
signed 8-bit number
experimental::MemoryRequirements workspace() const override
Return the memory requirements required by the workspace.
static IScheduler & get()
Access the scheduler singleton.
Definition: Scheduler.cpp:94
const int32_t * bias
unsigned int K