Compute Library
 21.02
NEGEMMAssemblyDispatch.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
27 #include "src/core/CPP/Validate.h"
28 #include "src/core/NEON/kernels/assembly/NEGEMMAssemblyWrapperKernel.h"
29 #include "src/core/NEON/kernels/assembly/arm_gemm.hpp"
30 
31 #include <arm_neon.h>
32 #include <cstdlib>
33 
34 namespace arm_compute
35 {
36 namespace
37 {
38 struct free_delete
39 {
40  void operator()(void *x)
41  {
42  free(x);
43  }
44 };
45 
46 struct Params
47 {
48  unsigned int M;
49  unsigned int N;
50  unsigned int K;
51  unsigned int batches;
52  unsigned int multis;
53  unsigned int sections;
54  bool indirect;
55 };
56 
57 Params extract_parameters(const ITensor *a, const ITensor *b, const ITensor *d, const AsmGemmInfo &info)
58 {
60 
61  Params p;
62  p.M = d->info()->tensor_shape().y();
63  p.K = a->info()->tensor_shape().x();
64  p.N = d->info()->tensor_shape().x();
65  p.batches = 1;
66  p.multis = 1;
67  p.sections = 1;
68  p.indirect = false;
69 
70  if(info.method == AsmConvMethod::Conv || info.method == AsmConvMethod::Indirect)
71  {
72  p.indirect = true;
73  p.sections = b->info()->tensor_shape()[2] * b->info()->tensor_shape()[3];
74  }
75  else
76  {
77  p.multis = b->info()->tensor_shape().z();
78  p.batches = d->info()->tensor_shape().total_size_upper(2) / p.multis; //COMPMID-1423: Agree on and document the layout of gemm inputs/outputs
79  }
80 
81  // Update M in case of GEMM3D for output
82  if(info.depth_output_gemm3d != 0)
83  {
84  p.M = d->info()->tensor_shape().y() * d->info()->tensor_shape().z();
85  p.batches = d->info()->tensor_shape().total_size_upper(3) / p.multis;
86  }
87 
88  return p;
89 }
90 
91 arm_gemm::Activation map_to_arm_gemm_activation(const ActivationLayerInfo &act)
92 {
93  arm_gemm::Activation gemm_act;
94 
95  // Early exit in case lower bound is other than 0, as it's not yet supported
96  if(act.b() != 0.f)
97  {
98  return gemm_act;
99  }
100 
101  switch(act.activation())
102  {
104  gemm_act.type = arm_gemm::Activation::Type::ReLU;
105  break;
107  gemm_act.type = arm_gemm::Activation::Type::BoundedReLU;
108  gemm_act.param1 = act.a();
109  gemm_act.param2 = 0.f;
110  break;
112  gemm_act.type = arm_gemm::Activation::Type::BoundedReLU;
113  gemm_act.param1 = act.a();
114  gemm_act.param2 = act.b();
115  break;
116  default:
117  gemm_act.type = arm_gemm::Activation::Type::None;
118  }
119 
120  return gemm_act;
121 }
122 
123 IScheduler::Hints scheduling_hint_heuristic(arm_gemm::GemmMethod method, DataType data_type)
124 {
125  // Schedule assembly kernel
126  const int granule_threshold = 200;
127  IScheduler::Hints scheduling_hint = IScheduler::Hints(Window::DimX);
128  if(method == arm_gemm::GemmMethod::GEMM_INTERLEAVED && data_type == DataType::F32)
129  {
130  scheduling_hint = IScheduler::Hints(Window::DimX, IScheduler::StrategyHint::DYNAMIC, granule_threshold);
131  }
132  else if(method == arm_gemm::GemmMethod::GEMM_INTERLEAVED_2D && (data_type == DataType::F32 || data_type == DataType::F16 || data_type == DataType::U8 || data_type == DataType::S8))
133  {
134  //GEMM_INTERLEAVED supports 2D parallelism, IScheduler::split_dimensions_all signals to parallelise over all window dimensions
135  scheduling_hint = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold);
136  }
137  else if(method == arm_gemm::GemmMethod::QUANTIZE_WRAPPER_2D && (data_type == DataType::QASYMM8 || data_type == DataType::QASYMM8_SIGNED))
138  {
139  //special case for QASYMM8 to support 2D parallelism, scheduler here may be tweaked differently compared to FP32 case
140  scheduling_hint = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold);
141  }
142 
143  return scheduling_hint;
144 }
145 
146 template <typename TypeInput, typename TypeOutput>
147 class FallbackTransform : public ITransformWeights
148 {
149 public:
150  FallbackTransform() noexcept {};
151  /** Prevent instances of this class from being copied (As this class contains pointers) */
152  FallbackTransform(const FallbackTransform &) = delete;
153  /** Default move constructor */
154  FallbackTransform(FallbackTransform &&) = default;
155  /** Prevent instances of this class from being copied (As this class contains pointers) */
156  FallbackTransform &operator=(const FallbackTransform &) = delete;
157  /** Default move assignment operator */
158  FallbackTransform &operator=(FallbackTransform &&) = default;
159  void run() override
160  {
161  _output.allocator()->allocate();
162  ARM_COMPUTE_ERROR_ON(_output.buffer() == nullptr);
163  _gemm_kernel_asm->pretranspose_B_array(_output.buffer(), _in1_ptr, _ldb, _multi_stride_b);
164  _reshape_run = true;
165  }
166 
167  void release() override
168  {
169  _output.allocator()->free();
170  }
171 
172  ITensor *get_weights() override
173  {
174  return &_output;
175  }
176 
177  uint32_t uid() override
178  {
179  uint32_t id = (_B_pretranspose_size | 0x80000000);
180  return id;
181  }
182 
183  void configure(size_t B_pretranspose_size, unsigned int alignment)
184  {
185  _output.allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
186  _B_pretranspose_size = B_pretranspose_size;
187  }
188 
189  void set_pretranspose(ITensor *tensor)
190  {
191  if(!_reshape_run)
192  {
193  _gemm_kernel_asm->set_pretransposed_B_data(tensor->buffer());
194  }
195  }
196 
197  void set_args(const int ldb, const TypeInput *in1_ptr, const int multi_stride_b, std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> gemm_kernel_asm)
198  {
199  _ldb = ldb;
200  _in1_ptr = in1_ptr;
201  _multi_stride_b = multi_stride_b;
202  _gemm_kernel_asm = gemm_kernel_asm;
203  }
204 
205 private:
206  Tensor _output{};
207  int _ldb{};
208  const TypeInput *_in1_ptr{};
209  int _multi_stride_b{};
210  size_t _B_pretranspose_size{};
211  std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> _gemm_kernel_asm{ nullptr };
212 };
213 
214 /** Fallback in case ACL doesn't have a function */
215 template <typename TypeInput, typename TypeOutput, class OutputStage = arm_gemm::Nothing>
216 class Fallback : public NEGEMMAssemblyDispatch::IFallback
217 {
218 public:
219  /** Destructor */
220  ~Fallback()
221  {
222  // Release memory if we have allocated the memory ourselves
223  if(_pretranspose && !(_weights_manager && _weights_manager->are_weights_managed(_b)))
224  {
225  delete _pretranspose;
226  }
227  }
228 
229  /** Initialise the functions's input and output.
230  *
231  * @param[in] a Input tensor containing the Matrix A.
232  * @param[in] b Input tensor containing the Matrix B.
233  * @param[in] c Input tensor containing the Matrix C.
234  * @param[out] d Output tensor to store the result of matrix multiplication.
235  * @param[in] args Matrix multiplication information.
236  * @param[in] gemm_info GEMM meta-data
237  * @param[in] memory_group Memory group to be used by the function.
238  * @param[in] weights_manager Weights manager to be used by the function.
239  * @param[in] os Output stage meta-data.
240  */
241  void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d,
242  arm_gemm::GemmArgs args, const AsmGemmInfo &gemm_info,
243  MemoryGroup &memory_group, IWeightsManager *weights_manager, const OutputStage &os = {});
244 
245  /** Set requantization shifts to be used
246  *
247  * @param[in] shifts Requantization shifts
248  *
249  * @return Pointer to the shift data
250  */
251  /** Set requantization data to be used
252  *
253  *
254  * @param shifts Requantization shifts
255  * @param multipliers Requantization multipliers
256  *
257  * @return A tuple with the pointers to the shift and multiplier data respectively
258  */
259  std::tuple<bool, const int32_t *, const int32_t *, const int32_t *> set_requantize_data(const std::vector<int32_t> &shifts,
260  const std::vector<int32_t> &multipliers);
261 
262  // Inherited methods overridden:
263  void run() override;
264  void prepare() override;
265  bool is_configured() const override;
266 
267 private:
268  /** Allocate a workspace tensor.
269  *
270  * @param[in] workspace_size Size to allocate.
271  * @param[in] memory_group Tensor memory group.
272  * @param[in] alignment Workspace memory alignment.
273  */
274  void allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment);
275  /** Configure the indirect buffer
276  *
277  * @param[in] a Input tensor containing the Matrix A.
278  * @param[in] b Input tensor containing the Matrix B.
279  * @param[out] d Output tensor to store the result of matrix multiplication.
280  * @param[in] info GEMM meta-data
281  */
282  void configure_indirect(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, const AsmGemmInfo &info);
283  /** Prepare the indirect buffer */
284  void prepare_indirect_buffer();
285 
286  /** Assembly Gemm kernel */
287  std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> _gemm_kernel_asm{ nullptr };
288  /** Optimised Neon kernel */
289  std::unique_ptr<INEKernel> _optimised_kernel{ nullptr };
290  /** Input A */
291  const ITensor *_a
292  {
293  nullptr
294  };
295  /** Input B */
296  const ITensor *_b
297  {
298  nullptr
299  };
300  const ITensor *_c
301  {
302  nullptr
303  };
304  /** Output */
305  ITensor *_d{ nullptr };
306  /** GEMM workspace */
307  Tensor _workspace{};
308  /** Pre-transpose tensor */
309  ITensor *_pretranspose{ nullptr };
310  /** Prepared flag */
311  bool _is_prepared{ false };
312  /** GEMM meta-data */
313  AsmGemmInfo _gemm_info{};
314  /** Weights manager */
315  IWeightsManager *_weights_manager{ nullptr };
316  /** Weights transform object */
317  FallbackTransform<TypeInput, TypeOutput> _weights_transform{};
318  /** GEMM kernel description */
319  arm_gemm::KernelDescription _kernel_info{};
320  /** Per channel quantization shifts */
321  std::vector<int32_t> _shifts{};
322  std::vector<int32_t> right_shifts{};
323  std::vector<int32_t> left_shifts{};
324  /** Per channel quantization multipliers */
325  std::vector<int32_t> _multipliers{};
326  /** Indirect buffer */
327  std::unique_ptr<const TypeInput *const *, free_delete> _indirect_arg{};
328  std::unique_ptr<const TypeInput *, free_delete> _indirect_buf{};
329  std::vector<TypeInput> _indirect_pad{};
330  arm_gemm::ConvolutionParameters _cp{};
331 };
332 
333 template <typename TypeInput, typename TypeOutput, class OutputStage>
334 std::tuple<bool, const int32_t *, const int32_t *, const int32_t *>
335 Fallback<TypeInput, TypeOutput, OutputStage>::set_requantize_data(const std::vector<int32_t> &shifts, const std::vector<int32_t> &multipliers)
336 {
337  _multipliers = multipliers;
338  _shifts = shifts;
339  bool need_left = false;
340  for(const auto s : _shifts)
341  {
342  left_shifts.push_back(std::max(-s, int32_t(0)));
343  right_shifts.push_back(std::min(-s, int32_t(0)));
344  if(s < 0 && !need_left)
345  {
346  need_left = true;
347  }
348  }
349  return std::make_tuple(need_left, left_shifts.data(), right_shifts.data(), _multipliers.data());
350 }
351 
352 template <typename TypeInput, typename TypeOutput, class OutputStage>
353 void Fallback<TypeInput, TypeOutput, OutputStage>::prepare_indirect_buffer()
354 {
355  const TypeInput *A_ptr = reinterpret_cast<TypeInput *>(_a->buffer());
356  const int multis = 1;
357  const int batches = _a->info()->tensor_shape().total_size_upper(3);
358  const size_t stride_A = _a->info()->strides_in_bytes().y() / sizeof(TypeInput);
359  const size_t batch_stride_A = _a->info()->strides_in_bytes()[3] / sizeof(TypeInput);
360  const size_t multi_stride_A = _a->info()->strides_in_bytes()[4] / sizeof(TypeInput);
361 
362  const size_t output_hw = _cp.output_height * _cp.output_width;
363  const int batch_size = _cp.kernel_height * _cp.kernel_width * output_hw * sizeof(TypeInput);
364  const size_t batch_stride = batch_size / sizeof(TypeInput);
365  const int multi_size = batch_size * batches;
366  const size_t multi_stride = multi_size / sizeof(TypeInput);
367 
368  for(int64_t m = 0; m < multis; m++)
369  {
370  for(int64_t b = 0; b < batches; b++)
371  {
372  for(int64_t output_y = 0; output_y < _cp.output_height; output_y++)
373  {
374  for(int64_t output_x = 0; output_x < _cp.output_width; output_x++)
375  {
376  int64_t output_xy = (output_y * _cp.output_width) + output_x;
377 
378  for(int64_t kernel_y = 0; kernel_y < _cp.kernel_height; kernel_y++)
379  {
380  for(int64_t kernel_x = 0; kernel_x < _cp.kernel_width; kernel_x++)
381  {
382  int64_t input_x = (output_x * _cp.output_stride_w) + kernel_x - _cp.padding_left;
383  int64_t input_y = (output_y * _cp.output_stride_h) + kernel_y - _cp.padding_top;
384  int64_t kernel_xy = (kernel_y * _cp.kernel_width) + kernel_x;
385  int64_t input_xy = (input_y * _cp.input_width) + input_x;
386 
387  if(input_x < 0 || input_x >= _cp.input_width || input_y < 0 || input_y >= _cp.input_height)
388  {
389  _indirect_buf.get()[m * multi_stride + b * batch_stride + kernel_xy * output_hw + output_xy] = _indirect_pad.data();
390  }
391  else
392  {
393  _indirect_buf.get()[m * multi_stride + b * batch_stride + kernel_xy * output_hw + output_xy] =
394  A_ptr + (m * multi_stride_A + b * batch_stride_A + input_xy * stride_A);
395  }
396  }
397  }
398  }
399  }
400  }
401  }
402 }
403 
404 template <typename TypeInput, typename TypeOutput, class OutputStage>
405 void Fallback<TypeInput, TypeOutput, OutputStage>::configure_indirect(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, const AsmGemmInfo &info)
406 {
407  ARM_COMPUTE_ERROR_ON(!(info.method == AsmConvMethod::Conv || info.method == AsmConvMethod::Indirect));
408 
409  float zeropad = 0.f;
410  if(is_data_type_quantized(a->data_type()))
411  {
412  zeropad = a->quantization_info().uniform().offset;
413  }
414 
415  const int64_t input_width = static_cast<int64_t>(a->tensor_shape()[1]);
416  const int64_t input_height = static_cast<int64_t>(a->tensor_shape()[2]);
417  const int64_t input_channels = static_cast<int64_t>(a->tensor_shape()[0]);
418  const int64_t kernel_width = static_cast<int64_t>(b->tensor_shape()[2]);
419  const int64_t kernel_height = static_cast<int64_t>(b->tensor_shape()[3]);
420  const int64_t output_width = static_cast<int64_t>(d->tensor_shape()[1]);
421  const int64_t output_height = static_cast<int64_t>(d->tensor_shape()[2]);
422 
423  _cp = { input_width, input_height, input_channels, kernel_width, kernel_height, output_width, output_height,
424  info.ps_info.stride().first, info.ps_info.stride().second, info.padding_top, info.padding_left, zeropad
425  };
426 
427  if(info.method == AsmConvMethod::Conv)
428  {
429  _gemm_kernel_asm->set_convolution_parameters(_cp);
430  }
431 
432  if(info.method == AsmConvMethod::Indirect)
433  {
434  const unsigned int multis = 1;
435  const unsigned int batches = a->tensor_shape().total_size_upper(3);
436  const unsigned int kernel_hw = _cp.kernel_width * _cp.kernel_height;
437  const unsigned int output_hw = _cp.output_width * _cp.output_height;
438 
439  using TypeInputPtr = TypeInput *;
440  const int batch_size = kernel_hw * output_hw * sizeof(TypeInputPtr);
441  const size_t batch_stride = batch_size / sizeof(TypeInputPtr);
442  const int multi_size = batch_size * batches;
443  const size_t multi_stride = multi_size / sizeof(TypeInputPtr);
444 
445  _indirect_buf = std::unique_ptr<const TypeInput *, free_delete>(reinterpret_cast<const TypeInput **>(malloc(multi_size * multis)));
446  _indirect_arg = std::unique_ptr<const TypeInput *const *, free_delete>(reinterpret_cast<const TypeInput *const **>(malloc(sizeof(TypeInput **) * kernel_hw * multis * batches)));
447  _indirect_pad = std::vector<TypeInput>(_cp.input_channels, TypeInput(zeropad));
448 
449  // Set indirect argument
450  int64_t pos = 0;
451  for(int64_t m = 0; m < multis; m++)
452  {
453  for(int64_t b = 0; b < batches; b++)
454  {
455  for(int64_t kernel_xy = 0; kernel_xy < kernel_hw; kernel_xy++)
456  {
457  (_indirect_arg.get())[pos++] = _indirect_buf.get() + m * multi_stride + b * batch_stride + kernel_xy * output_hw;
458  }
459  }
460  }
461 
462  _gemm_kernel_asm->set_indirect_parameters(a->tensor_shape()[0], _indirect_arg.get());
463  }
464 }
465 
466 template <typename TypeInput, typename TypeOutput, class OutputStage>
467 void Fallback<TypeInput, TypeOutput, OutputStage>::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d,
468  arm_gemm::GemmArgs args, const AsmGemmInfo &gemm_info,
469  MemoryGroup &memory_group, IWeightsManager *weights_manager, const OutputStage &os)
470 {
471  arm_gemm::GemmConfig gemm_cfg;
472  _kernel_info = arm_gemm::get_gemm_method<TypeInput, TypeOutput, OutputStage>(args, os);
473  _weights_manager = weights_manager;
474  if(_kernel_info.method != arm_gemm::GemmMethod::GEMV_BATCHED)
475  {
476  gemm_cfg.filter = _kernel_info.name;
477  args._cfg = &gemm_cfg;
478  }
479  _gemm_kernel_asm = arm_gemm::gemm<TypeInput, TypeOutput, OutputStage>(args, os);
480  if(_gemm_kernel_asm == nullptr)
481  {
482  //configuration not supported: Leave function unconfigured:
483  return;
484  }
485 
486  // arm_compute wrapper for the Gemm object (see above)
487  std::unique_ptr<NEGEMMAssemblyWrapperKernel<TypeInput, TypeOutput>> acl_gemm_wrapper = std::make_unique<NEGEMMAssemblyWrapperKernel<TypeInput, TypeOutput>>();
488  ARM_COMPUTE_ERROR_ON(acl_gemm_wrapper == nullptr);
489  acl_gemm_wrapper->configure(_gemm_kernel_asm.get(), gemm_cfg.filter);
490  const size_t workspace_size = _gemm_kernel_asm->get_working_size();
491  if(workspace_size > 0)
492  {
493  // Allocate workspace
494  const unsigned int alignment = 4096;
495  allocate_workspace(workspace_size, memory_group, alignment);
496  }
497 
498  //if we disable this code below in brackets then ConvLayer deadlocks when threads > 1 and
499  //the shapes are In=1x1x1024 Weights=1x1x1024x1001 Biases=1001 Out=1x1x1001
500  {
501  const unsigned int window_size = _gemm_kernel_asm->get_window_size().total_size();
502  if(window_size < static_cast<unsigned int>(args._maxthreads))
503  {
504  _gemm_kernel_asm->set_nthreads(window_size);
505  }
506  }
507 
508  _optimised_kernel = std::move(acl_gemm_wrapper);
509  _a = a;
510  _b = b;
511  _c = c;
512  _d = d;
513  _gemm_info = gemm_info;
514  // Check for pre-transposed support
515  if(_gemm_kernel_asm->B_pretranspose_required())
516  {
517  // Forcing 128-byte alignment (required by 32-bit kernels)
518  const unsigned int alignment = 128;
519  const size_t B_pretranspose_size = _gemm_kernel_asm->get_B_pretransposed_array_size();
520  if(weights_manager && _weights_manager->are_weights_managed(b))
521  {
522  _weights_transform.configure(B_pretranspose_size, alignment);
523  _pretranspose = _weights_manager->acquire(b, &_weights_transform);
524  }
525  else
526  {
527  _pretranspose = new Tensor();
528  static_cast<Tensor *>(_pretranspose)->allocator()->init(TensorInfo(TensorShape{ (B_pretranspose_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
529  }
530  }
531 
532  // Handle indirect GEMM convolution
533  if(gemm_info.method == AsmConvMethod::Conv || gemm_info.method == AsmConvMethod::Indirect)
534  {
535  configure_indirect(a->info(), b->info(), d->info(), gemm_info);
536  }
537 }
538 
539 template <typename TypeInput, typename TypeOutput, class OutputStage>
540 void Fallback<TypeInput, TypeOutput, OutputStage>::prepare()
541 {
542  if(!_is_prepared)
543  {
544  // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C.
545  if(_c && _c->info()->data_type() == DataType::S32)
546  {
547  _gemm_kernel_asm->set_quantized_bias(reinterpret_cast<const int32_t *>(_c->buffer() + _c->info()->offset_first_element_in_bytes()), 0);
548  }
549 
550  // Pretranspose B if required
551  if(_gemm_kernel_asm->B_pretranspose_required())
552  {
553  const int ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput);
554  const auto in1_ptr = reinterpret_cast<const TypeInput *>(_b->buffer() + _b->info()->offset_first_element_in_bytes());
555  const int multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput);
556 
557  if(_weights_manager && _weights_manager->are_weights_managed(_b))
558  {
559  _weights_transform.set_args(ldb, in1_ptr, multi_stride_b, _gemm_kernel_asm);
560  _weights_manager->run(_b, &_weights_transform);
561 
562  // If we didn't run the reshape function, set the pretransposed buffer
563  if(!_weights_transform.is_reshape_run())
564  {
565  _weights_transform.set_pretranspose(_pretranspose);
566  }
567  }
568  else
569  {
570  static_cast<Tensor *>(_pretranspose)->allocator()->allocate();
571  ARM_COMPUTE_ERROR_ON(_pretranspose->buffer() == nullptr);
572  _gemm_kernel_asm->pretranspose_B_array(_pretranspose->buffer(), in1_ptr, ldb, multi_stride_b);
573  _b->mark_as_unused();
574  }
575  }
576 
577  if(_gemm_info.method == AsmConvMethod::Indirect)
578  {
579  prepare_indirect_buffer();
580  }
581 
582  _is_prepared = true;
583  }
584 }
585 
586 template <typename TypeInput, typename TypeOutput, class OutputStage>
587 void Fallback<TypeInput, TypeOutput, OutputStage>::allocate_workspace(size_t workspace_size, MemoryGroup &memory_group, size_t alignment)
588 {
589  ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "size cannot be 0");
590  _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
591  memory_group.manage(&_workspace);
592  _workspace.allocator()->allocate();
593 }
594 
595 template <typename TypeInput, typename TypeOutput, class OutputStage>
596 bool Fallback<TypeInput, TypeOutput, OutputStage>::is_configured() const
597 {
598  return _optimised_kernel != nullptr;
599 }
600 
601 template <typename TypeInput, typename TypeOutput, class OutputStage>
603 {
604  int lda = _a->info()->strides_in_bytes().y() / sizeof(TypeInput);
605  int ldb = 0;
606  const int ldd = _d->info()->strides_in_bytes().y() / sizeof(TypeOutput);
607 
608  const size_t a_batch_idx = _gemm_info.reinterpret_input_as_3d != 0 ? 3 : 2;
609  const size_t a_multi_idx = a_batch_idx + 1;
610  const size_t d_batch_idx = _gemm_info.depth_output_gemm3d != 0 ? 3 : 2;
611  const size_t d_multi_idx = d_batch_idx + 1;
612 
613  int batch_stride_a = _a->info()->strides_in_bytes()[a_batch_idx] / sizeof(TypeInput);
614  const int batch_stride_d = _d->info()->strides_in_bytes()[d_batch_idx] / sizeof(TypeOutput);
615 
616  int multi_stride_a = _a->info()->strides_in_bytes()[a_multi_idx] / sizeof(TypeInput);
617  int multi_stride_b = 0;
618  const int multi_stride_d = _d->info()->strides_in_bytes()[d_multi_idx] / sizeof(TypeOutput);
619 
620  auto in0_ptr = reinterpret_cast<const TypeInput *>(_a->buffer() + _a->info()->offset_first_element_in_bytes());
621  const TypeInput *in1_ptr = nullptr;
622  auto out_ptr = reinterpret_cast<TypeOutput *>(_d->buffer() + _d->info()->offset_first_element_in_bytes());
623 
624  // Check if B is pre-tranposed and de-reference if not
625  if(!_gemm_kernel_asm->B_is_pretransposed())
626  {
627  ldb = _b->info()->strides_in_bytes().y() / sizeof(TypeInput);
628  multi_stride_b = _b->info()->strides_in_bytes().z() / sizeof(TypeInput);
629  in1_ptr = reinterpret_cast<const TypeInput *>(_b->buffer() + _b->info()->offset_first_element_in_bytes());
630  }
631 
632  const auto scheduling_hint = scheduling_hint_heuristic(_kernel_info.method, _d->info()->data_type());
633 
634  // Set workspace if needed and reset number of threads as buffer manager gets re-created with max_threads
635  if(_workspace.buffer() != nullptr)
636  {
637  _gemm_kernel_asm->set_working_space(reinterpret_cast<void *>(_workspace.buffer()));
638  const unsigned int split_dim = scheduling_hint.split_dimension();
639  const unsigned int window_size = _gemm_kernel_asm->get_window_size().total_size();
640  unsigned int num_threads = NEScheduler::get().num_threads();
641  if(window_size < num_threads)
642  {
643  num_threads = window_size;
644  }
645  if(split_dim != IScheduler::split_dimensions_all)
646  {
647  // Make sure the kernel does not expect more threads than we can actually spawn
648  const unsigned int num_iterations = _optimised_kernel.get()->window().num_iterations(split_dim);
649  num_threads = std::min(num_iterations, num_threads);
650  }
651  _gemm_kernel_asm->set_nthreads(num_threads);
652  }
653 
654  // Prepare assembly kernel
655  prepare();
656 
657  // Setup up matrix bias in the assembly kernel, it's just a pointer to matrix C.
658  TypeOutput *bias = nullptr;
659  if(_c && _c->info()->data_type() != DataType::S32)
660  {
661  bias = reinterpret_cast<TypeOutput *>(_c->buffer() + _c->info()->offset_first_element_in_bytes());
662  }
663 
664  if(_gemm_info.method == AsmConvMethod::Indirect)
665  {
666  in0_ptr = nullptr;
667  lda = 0;
668  batch_stride_a = 0;
669  multi_stride_a = 0;
670  }
671 
672  // Set gemm parameters
673  _gemm_kernel_asm->set_arrays(in0_ptr, lda, batch_stride_a, multi_stride_a,
674  in1_ptr, ldb, multi_stride_b,
675  out_ptr, ldd, batch_stride_d, multi_stride_d,
676  bias, 0);
677  // Schedule
678  NEScheduler::get().schedule(_optimised_kernel.get(), scheduling_hint);
679 }
680 
681 template <typename TypeInput, typename TypeOutput>
682 void create_arm_gemm(std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &arm_gemm, MemoryGroup &memory_group,
683  const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, arm_gemm::Activation activation, const AsmGemmInfo &info,
684  IWeightsManager *weights_manager)
685 {
686  Params p = extract_parameters(a, b, d, info);
687  const CPUInfo &ci = NEScheduler::get().cpu_info();
688  unsigned int num_threads = NEScheduler::get().num_threads();
689 
690  arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, activation, num_threads);
691 
692  // Create arm_gemm fallback
693  auto fallback = std::make_unique<Fallback<TypeInput, TypeOutput>>();
694  fallback->configure(a, b, c, d, args, info, memory_group, weights_manager);
695  arm_gemm = std::move(fallback);
696 }
697 
698 template <typename TypeInput, typename TypeOutput>
699 void create_arm_gemm_quant(std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &arm_gemm, MemoryGroup &memory_group,
700  const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, arm_gemm::Activation activation, const AsmGemmInfo &info,
701  IWeightsManager *weights_manager)
702 {
703  ARM_COMPUTE_UNUSED(activation);
704  Params p = extract_parameters(a, b, d, info);
705  const CPUInfo &ci = NEScheduler::get().cpu_info();
706  unsigned int num_threads = NEScheduler::get().num_threads();
707 
708  arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, activation, num_threads);
709 
710  // Create arm_gemm fallback
711  auto fallback = std::make_unique<Fallback<TypeInput, TypeOutput, arm_gemm::Requantize32>>();
712 
713  // Configure requantization info
714  const int32_t negation = info.negated_offsets ? 1 : -1;
715  const int32_t a_offset = -a->info()->quantization_info().uniform().offset * negation;
716  const int32_t b_offset = -b->info()->quantization_info().uniform().offset * negation;
717  const GEMMLowpOutputStageInfo os_info = info.output_stage;
718 
719  arm_gemm::Requantize32 gemm_requant_info{};
720  if(os_info.gemmlowp_shifts.size() > 1)
721  {
722  const auto requantize_data = fallback->set_requantize_data(os_info.gemmlowp_shifts, os_info.gemmlowp_multipliers);
723  gemm_requant_info = arm_gemm::Requantize32(nullptr, 0,
724  a_offset, b_offset, os_info.gemmlowp_offset,
725  (std::get<0>(requantize_data)) ? std::get<1>(requantize_data) : nullptr,
726  std::get<2>(requantize_data),
727  std::get<3>(requantize_data),
728  os_info.gemmlowp_min_bound, os_info.gemmlowp_max_bound);
729  }
730  else
731  {
732  gemm_requant_info = arm_gemm::Requantize32(nullptr, 0,
733  a_offset, b_offset, os_info.gemmlowp_offset,
734  -os_info.gemmlowp_shift, os_info.gemmlowp_multiplier,
735  os_info.gemmlowp_min_bound, os_info.gemmlowp_max_bound);
736  }
737 
738  // Configure fallback
739  fallback->configure(a, b, c, d, args, info, memory_group, weights_manager, gemm_requant_info);
740  arm_gemm = std::move(fallback);
741 }
742 
743 } //namespace
744 
745 NEGEMMAssemblyDispatch::NEGEMMAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
746  : _arm_gemm(nullptr), _memory_group(std::move(memory_manager)), _weights_manager(weights_manager)
747 {
748 }
749 
751 {
752  ARM_COMPUTE_UNUSED(c, info);
756 
757 #ifndef __aarch64__
758  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->element_size() == 1, "8bit integer types only supported for aarch64");
759 #endif /* __aarch64__ */
765  {
767  }
768  else
769  {
771  }
772  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F32 && d->data_type() != DataType::F32, "Only F32 output supported for F32 input");
773  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F16 && d->data_type() != DataType::F16, "Only F16 output supported for F16 input");
774  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::BFLOAT16 && d->data_type() != DataType::F32, "Only F32 output supported for BFLOAT16 input");
775  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::U8 && d->data_type() != DataType::U32, "Only U32 output supported for U8 input");
776  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::S8 && d->data_type() != DataType::S32, "Only S32 output supported for S8 input");
777  ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::QASYMM8 && d->data_type() != DataType::QASYMM8, "Only QASYMM8 output supported for QASYMM8 input");
778  return Status{};
779 }
780 
782 {
783  arm_gemm::Activation act = map_to_arm_gemm_activation(activation);
784  return act.type != arm_gemm::Activation::Type::None;
785 }
786 
787 void NEGEMMAssemblyDispatch::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, const AsmGemmInfo &info)
788 {
790  arm_gemm::Activation act = map_to_arm_gemm_activation(info.activation_info);
791 
792  //If we don't support a combination of data types, silently return: it is the caller's responsibility to check if configure() was successful via is_configured()
793  if(!NEGEMMAssemblyDispatch::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, d->info(), info))
794  {
795  return;
796  }
797 
798  switch(a->info()->data_type())
799  {
800  case DataType::F32:
801  create_arm_gemm<float, float>(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager);
802  break;
803 #ifdef __aarch64__
804  case DataType::U8:
805  case DataType::QASYMM8:
806  if(d->info()->data_type() == DataType::S32)
807  {
808  create_arm_gemm<uint8_t, uint32_t>(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager);
809  }
810  else
811  {
812  create_arm_gemm_quant<uint8_t, uint8_t>(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager);
813  }
814  break;
815  case DataType::S8:
817  if(d->info()->data_type() == DataType::S32)
818  {
819  create_arm_gemm<int8_t, int32_t>(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager);
820  }
821  else
822  {
823  create_arm_gemm_quant<int8_t, int8_t>(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager);
824  }
825  break;
826 #endif /* __aarch64__ */
827 #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
828  case DataType::BFLOAT16:
829  create_arm_gemm<bfloat16, float>(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager);
830  break;
831 #endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
832 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
833  case DataType::F16:
834  create_arm_gemm<float16_t, float16_t>(_arm_gemm, _memory_group, a, b, c, d, act, info, _weights_manager);
835  break;
836 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
837  default:
838  break;
839  }
840 }
841 
843 {
844  ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr);
845  _arm_gemm->prepare();
846 }
847 
849 {
850  return _arm_gemm != nullptr && _arm_gemm->is_configured();
851 }
852 
854 {
855  MemoryGroupResourceScope scope_mg(_memory_group);
856 
857  ARM_COMPUTE_ERROR_ON(_arm_gemm == nullptr);
858  _arm_gemm->run();
859 }
860 } //namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:1168
unsigned int M
NEGEMMAssemblyDispatch(std::shared_ptr< IMemoryManager > memory_manager=nullptr, IWeightsManager *weights_manager=nullptr)
Constructor.
ActivationLayerInfo activation_info
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
Definition: Validate.h:108
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(tensor)
Definition: Validate.h:114
SimpleTensor< float > b
Definition: DFT.cpp:157
1 channel, 1 U8 per channel
virtual DataType data_type() const =0
Data type used for each element of the tensor.
unsigned int batches
1 channel, 1 F32 per channel
Split the workload evenly among the threads.
static constexpr unsigned int split_dimensions_all
When arm_compute::ISchedular::Hints::_split_dimension is initialized with this value then the schedul...
Definition: IScheduler.h:62
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
CPUInfo & cpu_info()
Get CPU info.
Definition: IScheduler.cpp:42
arm_compute::ActivationLayerInfo::ActivationFunction Activation
Constant TensorID specifying an equivalent of null tensor.
Definition: Types.h:70
Status class.
Definition: Error.h:52
void run() override
Run the kernels contained in the function.
Activation Layer Information class.
Definition: Types.h:1550
unsigned int multis
Interface for Neon tensor.
Definition: ITensor.h:36
unsigned int N
bool indirect
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
Split the workload dynamically using a bucket system.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
1 channel, 1 S32 per channel
16-bit brain floating-point number
const DataType data_type
Definition: Im2Col.cpp:150
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
static bool is_activation_supported(const ActivationLayerInfo &activation)
Checks if activation is supported by the gemm assembly dispatcher.
void prepare() override
Prepare the function for executing.
1 channel, 1 U32 per channel
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
Definition: Utils.h:1245
quantized, asymmetric fixed-point 8-bit number unsigned
bool is_configured() const
Was the function successfully configured ?
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
virtual size_t element_size() const =0
Element size in bytes calculated as data_size() * num_channels()
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, const AsmGemmInfo &info)
Indicates whether or not this function can be used to process the given parameters.
Weights manager interface to handle weights transformations.
input allocator() -> allocate()
quantized, symmetric per channel fixed-point 8-bit number
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
virtual void schedule(ICPPKernel *kernel, const Hints &hints)=0
Runs the kernel in the same thread as the caller synchronously.
void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, const AsmGemmInfo &info)
If supported create a Compute Library function else fallback to the arm_gemm function.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
unsigned int sections
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
unsigned int K
quantized, asymmetric fixed-point 8-bit number signed
virtual unsigned int num_threads() const =0
Returns the number of threads that the SingleThreadScheduler has in his pool.
DataType
Available data types.
Definition: Types.h:77
signed 8-bit number
static IScheduler & get()
Access the scheduler singleton.
Definition: Scheduler.cpp:94