Compute Library
 22.05
gemm_hybrid_indirect.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #pragma once
25 
26 #if !defined(_WIN64) && !defined(__OpenBSD__)
27 #include <alloca.h>
28 #endif /* !defined(_WIN64) && !defined(__OpenBSD__) */
29 
30 #include <algorithm>
31 #include <cassert>
32 
33 #include "arm_gemm.hpp"
34 #include "bias_adder.hpp"
35 #include "convolver.hpp"
36 #include "ndrange.hpp"
38 #include "transform.hpp"
39 #include "utils.hpp"
40 
41 #ifdef CYCLE_PROFILING
42 #include "profiler.hpp"
43 #endif
44 
45 #ifndef UNUSED
46 #define __I_DEFINED_UNUSED
47 #define UNUSED(x) ((void)(x))
48 #endif
49 
50 namespace arm_gemm {
51 
52 namespace {
53 
54 // We need to invoke the kernel differently for quantizing and non-quantizing cases, so here is a shim class to do
55 // that.
56 
57 template<typename OutputStage, bool SeparateQuantize = false>
58 class run_hybrid_kernel {
59 public:
60  template<typename strategy, typename Tlo, typename Tro, typename Tr>
61  static inline void run (
62 #ifdef CYCLE_PROFILING
63  profiler &prof,
64 #endif
65  const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
66  unsigned int kern_k, const Tro *b_ptr, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
67  const OutputStage &os, const int32_t *col_bias, unsigned int n_0 );
68 };
69 
70 template<>
71 template<typename strategy, typename Tlo, typename Tro, typename Tr>
73 #ifdef CYCLE_PROFILING
74  profiler &prof,
75 #endif
76  const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
77  unsigned int kern_k, const Tro *b_ptr, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
78  const Nothing &, const int32_t *, unsigned int) {
79 #ifdef CYCLE_PROFILING
80  auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
81 #endif
82  UNUSED(kern_k);
83 
84  /* Indirect hybrid kernels read the full width of the bias. So we need to detect the case where we are writing
85  * a partial block and pad the bias for that block. */
86  if (bias_ptr && !accumulate && (N % strategy::out_width() != 0)) {
87  /* Break N into "N_bulk" (a multiple of output width) and "N_remainder" */
88  unsigned int N_remainder = N % strategy::out_width();
89  unsigned int N_bulk = N - N_remainder;
90 
91  /* Output argument to be used for the tail */
92  IndirectOutputArg<Tr> offset_output = output_arg;
93 
94  /* If there is a "bulk" to be processed, handle that and update "offset_output" appropriately. */
95  if (N_bulk > 0) {
96  strat.kernel(num_strings, string_ptr, A_arg, M, N_bulk, b_ptr, output_arg, bias_ptr, act, accumulate);
97 
98  if (output_arg.is_indirect) {
99  offset_output = IndirectOutputArg<Tr>(output_arg.indirect.ptr, output_arg.indirect.offset + N_bulk);
100  } else {
101  offset_output = IndirectOutputArg<Tr>(output_arg.direct.base + N_bulk, output_arg.direct.stride);
102  }
103  }
104 
105  /* Pad the bias buffer for the remainder */
106  Tr *bias_pad_buffer = reinterpret_cast<Tr *>(alloca(strategy::out_width() * sizeof(Tr)));
107  memcpy(bias_pad_buffer, bias_ptr + N_bulk, N_remainder * sizeof(Tr));
108 
109  /* Process the remainder, offsetting the B pointer as needed. */
110  strat.kernel(num_strings, string_ptr, A_arg, M, N_remainder, b_ptr + (N_bulk * kern_k), offset_output, bias_pad_buffer, act, accumulate);
111  } else {
112  strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, output_arg, bias_ptr, act, accumulate);
113  }
114 }
115 
116 template<>
117 template<typename strategy, typename Tlo, typename Tro, typename Tr>
119 #ifdef CYCLE_PROFILING
120  profiler &prof,
121 #endif
122  const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
123  unsigned int kern_k, const Tro *b_ptr, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
124  const Requantize32 &os, const int32_t *col_bias, unsigned int n_0 ) {
125 #ifdef CYCLE_PROFILING
126  auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
127 #endif
128  UNUSED(kern_k);
129 
130  strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, output_arg, &os, col_bias + n_0, n_0);
131 }
132 
133 template<>
134 template<typename strategy, typename Tlo, typename Tro, typename Tr>
136 #ifdef CYCLE_PROFILING
137  profiler &prof,
138 #endif
139  const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
140  unsigned int kern_k, const Tro *b_ptr, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
141  const Requantize32 &os, const int32_t *col_bias, unsigned int n_0 ) {
142  UNUSED(kern_k);
143  // On this route we will only process one kernel height at a time and will make sure this happens in the driver loop.
144  assert(M <= strategy::out_height());
145  // We don't yet support indirect output (as the quantizer can't do it).
146  assert(output_arg.is_indirect == false);
147 
148  // We need a row sum buffer and intermediate output buffer.
149  // These go on the stack as they are not too large, using an automatic array and alloca() respectively.
150  int32_t row_sums[strategy::out_height()];
151  typename strategy::result_type *result_buffer;
152 
153  unsigned int output_width = roundup(N, strategy::out_width());
154 
155  result_buffer = reinterpret_cast<typename strategy::result_type *>(alloca(output_width * strategy::out_height() * sizeof(typename strategy::result_type)));
156 
157  {
158 #ifdef CYCLE_PROFILING
159  auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
160 #endif
161  // Perform the GEMM, into the output buffer.
162  strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, IndirectOutputArg<typename strategy::result_type>(result_buffer, output_width), nullptr, Activation(), false);
163  }
164 
165  if (os.b_offset != 0) {
166 #ifdef CYCLE_PROFILING
167  auto p = prof.ScopedProfiler(PROFILE_ROWSUMS, (unsigned long)M * kern_k);
168 #endif
169  row_sums_indirect(num_strings, string_ptr, A_arg, M, row_sums, &os);
170  } else {
171  memset(row_sums, 0, sizeof(int32_t) * strategy::out_height());
172  }
173 
174  {
175 #ifdef CYCLE_PROFILING
176  auto p = prof.ScopedProfiler(PROFILE_QUANTIZE, (unsigned long)M * N);
177 #endif
178  // Quantize
179  requantize_block_32(os, N, M, result_buffer, output_width, output_arg.direct.base, output_arg.direct.stride, row_sums, col_bias + n_0, n_0);
180  }
181 }
182 
183 } // anonymous namespace
184 
185 // Implementation of the GemmCommon abstract class.
186 template<typename strategy, typename To, typename Tr, typename OutputStage = Nothing, bool SeparateQuantize = false>
187 class GemmHybridIndirect : public GemmCommon<To, Tr> {
188  typedef typename strategy::lhs_operand_type Tloi;
189  typedef typename strategy::rhs_operand_type Troi;
190  typedef typename strategy::result_type Tri;
191 
192  GemmArgs _args;
193  OutputStage _os = {};
194 
195  /* Quantized support (in addition to 'output stage' above) */
196  int32_t *_col_bias = nullptr;
197 
198  const unsigned int _Ktotal;
199  const unsigned int _rounded_Ksize;
200 
201  /* Blocking info */
202  const unsigned int _k_block;
203  const unsigned int _n_block;
204  const unsigned int _Mround;
205 
206  /* Pretransposed buffer. */
207  const Troi *_B_transposed=nullptr;
208 
209  /* Indirect parameters. _indirect_buf doubles as a flag to indicate that "indirect" transform should be used. */
210  const To * const * const * _indirect_buf = nullptr;
211 
212  /* Convolver - only set up for convolution problems, so also doubles as a flag. */
213  std::unique_ptr<convolver<To>> _convolver = nullptr;
214 
215  // Array of pointers to output rows
216 // Tr * const * _output_ptrs;
217 
218  const NDRange<4> _window_range;
219 
220  unsigned int get_col_sum_size() const {
221  if (std::is_same<OutputStage, Requantize32>::value) {
222  return _args._Nsize * _args._nmulti * sizeof(int32_t);
223  } else {
224  return 0;
225  }
226  }
227 
228  static unsigned int get_ktotal(const GemmArgs &args) {
229  return args._Ksections * roundup(args._Ksize, strategy::k_unroll());
230  }
231 
232  static unsigned int compute_k_block(const GemmArgs &args) {
233  // Some kernels don't support accumulate mode - these can't do K blocking at all.
234  if (!strategy::supports_accumulate() || std::is_same<OutputStage, Requantize32>::value) {
235  return get_ktotal(args);
236  }
237 
238  if (args._cfg && args._cfg->inner_block_size) {
239  return roundup(args._cfg->inner_block_size, strategy::k_unroll());
240  }
241 
242  // Experimental data suggests an optimal block size of 512 for FP32 (scaling accordingly for other
243  // datatypes); but don't divide into blocks until we hit 1.5X this size.
244  unsigned int target_block_size = 2048 / sizeof(To);
245  auto ktotal = get_ktotal(args);
246 
247  if (ktotal > ((target_block_size*3)/2)) {
248  unsigned int target_blocks = iceildiv(ktotal, target_block_size);
249 
250  unsigned int block_size = iceildiv(ktotal, target_blocks);
251 
252  block_size = roundup(block_size, strategy::k_unroll());
253 
254  return block_size;
255  }
256 
257  return ktotal;
258  }
259 
260  // New N blocking strategy: if it's narrow, or much taller than it is wide, do the full width. Otherwise do a
261  // single block.
262  static unsigned int compute_n_block(const GemmArgs &args, const OutputStage os = {}) {
263  if (args._cfg && args._cfg->outer_block_size) {
264  return args._cfg->outer_block_size;
265  }
266 
267  if (args._Nsize <= 64) {
268  return args._Nsize;
269  }
270 
271  if ((args._Msize / args._Nsize) > 155) {
272  return args._Nsize;
273  }
274 
275  // "Asymmetric" quantizing GEMMs require a different approach - the tall skinny blocks we would otherwise
276  // use imply a great deal of repeated work performing the row sums. If row sums are involved, work out how
277  // much "column" parallelism is going to be required and set the block size accordingly.
278  if (std::is_same<OutputStage, Requantize32>::value) {
279  const Requantize32 *qp = reinterpret_cast<const Requantize32 *>(&os);
280 
281  // Row sums only needed if b_offset isn't 0
282  if (qp->b_offset != 0) {
283  // We can already parallelize across batches, multis and rows (in units of 'out_height')
284  int multi_row_parallelism = args._nmulti * args._nbatches * iceildiv(args._Msize, strategy::out_height());
285 
286  // If this isn't enough, we will need to split up the columns too.
287  if (multi_row_parallelism < args._maxthreads) {
288  unsigned int columns_needed = iceildiv(args._maxthreads, multi_row_parallelism);
289 
290  unsigned int n_block = iceildiv(args._Nsize, columns_needed);
291 
292  return roundup(n_block, strategy::out_width());
293  }
294 
295  // Multi/Batch/Row parallelism is enough - don't split up the columns.
296  return args._Nsize;
297  }
298  }
299 
300  if (args._Ksize <= 128 && args._maxthreads <= 16) {
301  return strategy::out_width() * 3;
302  }
303 
304  return strategy::out_width();
305  }
306 
307 public:
309  GemmHybridIndirect & operator= (GemmHybridIndirect &) = delete;
310 
311  /* Constructor */
312  GemmHybridIndirect(const GemmArgs &args, const OutputStage &os)
313  : _args(args), _os(os), _Ktotal(get_ktotal(args)),
314  _rounded_Ksize(roundup(args._Ksize, strategy::k_unroll())),
315  _k_block(compute_k_block(args)), _n_block(compute_n_block(args, os)),
316  _Mround(roundup(args._Msize, strategy::out_height())),
317  _window_range(iceildiv(args._Msize, strategy::out_height()), args._nbatches,
318  iceildiv(args._Nsize, _n_block), args._nmulti)
319  {
320  // We take a copy of the arguments (not a pointer or reference), but there is no lifetime requirement on the
321  // GemmConfig. Clear out the pointer to avoid accidents.
322  _args._cfg = nullptr;
323  }
324 
325  /* Constructor without OutputStage */
327  : _args(args), _Ktotal(get_ktotal(args)),
328  _rounded_Ksize(roundup(args._Ksize, strategy::k_unroll())),
329  _k_block(compute_k_block(args)), _n_block(compute_n_block(args)),
330  _Mround(roundup(args._Msize, strategy::out_height())),
331  _window_range(iceildiv(args._Msize, strategy::out_height()), args._nbatches,
332  iceildiv(args._Nsize, _n_block), args._nmulti)
333  {
334  // We take a copy of the arguments (not a pointer or reference), but there is no lifetime requirement on the
335  // GemmConfig. Clear out the pointer to avoid accidents.
336  _args._cfg = nullptr;
337  }
338 
339  // Interface implementation - Compulsory functions
340  ndrange_t get_window_size() const override {
341  return { _window_range.total_size() };
342  }
343 
344  // This kernel can always be dynamically scheduled.
345  bool supports_dynamic_scheduling() const override {
346  return true;
347  }
348 
349  // Execute
350  void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override {
351 #ifdef CYCLE_PROFILING
352  profiler prof;
353 #endif
354  strategy strat(_args._ci);
355 
356  std::vector<const To *> in_row_ptrs;
357  std::vector<const To * const *> in_row_strings;
358  std::vector<unsigned int> string_lengths;
359 
360  // In convolution mode, we need input pointers.
361  if (_convolver) {
362  in_row_ptrs = std::vector<const To *>(strategy::out_height() * _args._Ksections, nullptr);
363  in_row_strings = std::vector<const To * const *>(_args._Ksections, nullptr);
364 
365  for (unsigned int i=0; i<_args._Ksections; i++) {
366  in_row_strings[i] = &(in_row_ptrs[i * strategy::out_height()]);
367  }
368  }
369 
370  // In any indirect mode, we need the string lengths.
371  if (_args._indirect_input) {
372  string_lengths = std::vector<unsigned int>(_args._Ksections, 0);
373  }
374 
375  /* Make sure we've been set up correctly. */
376  assert(_B_transposed);
377  static_assert(std::is_same<To, Tloi>::value, "gemm_native: Operand types must be the same.");
378 // static_assert(std::is_same<Tr, Tri>::value, "gemm_native: Result types must be the same.");
379 
380  /* For now, each work item implies all the K for a given output
381  * pixel (so we don't need to synchronize access to the output
382  * array). So separate the loop over K blocks here. */
383  for (unsigned int k0=0; k0<_Ktotal; k0+=_k_block) {
384  unsigned int kmax = std::min(k0 + _k_block, _Ktotal);
385  unsigned int kern_k = roundup(kmax-k0, strategy::k_unroll());
386 
387  const bool first_pass = (k0 == 0);
388  const bool last_pass = (kmax == _Ktotal);
389 
390  unsigned int first_section = (k0 / _rounded_Ksize);
391  unsigned int first_offset = (k0 % _rounded_Ksize);
392  unsigned int kleft = kern_k;
393  unsigned int sections=0;
394  unsigned int offset = first_offset;
395 
396  if (_args._indirect_input) {
397  while (kleft) {
398  // When chopping into sections: the amount that goes into 'string_lengths' is the amount to be
399  // processed (excluding padding). But the amount we subtract from 'kleft' takes account of any
400  // padding applied.
401  string_lengths[sections] = std::min(kleft, _args._Ksize - offset);
402  kleft -= std::min(kleft, _rounded_Ksize - offset);
403  sections++;
404  offset=0;
405  }
406  }
407 
408  auto p = _window_range.iterator(work_range.get_position(0), work_range.get_position_end(0));
409 
410  if (p.done()) {
411  return;
412  }
413 
414  // Process rows either 'out_height' rows at a time, or do all valid rows at once with a single kernel call.
415  // The separate quantizer path only handles one block of rows at a time (as it has to store sums and intermediate results).
416  // THe convolution path only generates the pointers for one block of rows at a time.
417  const bool process_all_rows = (!SeparateQuantize && !_convolver);
418 
419  do {
420  const unsigned int m_start = p.dim(0) * strategy::out_height();
421  const unsigned int m_end = process_all_rows ? std::min(p.dim0_max() * strategy::out_height(), _args._Msize) : std::min(m_start + strategy::out_height(), _args._Msize);
422 // const unsigned int m_end = std::min(m_start + strategy::out_height(), _args._Msize);
423  const unsigned int batch = p.dim(1);
424  const unsigned int n0 = p.dim(2) * _n_block;
425  const unsigned int nmax = std::min(n0 + _n_block, _args._Nsize);
426  const unsigned int multi = p.dim(3);
427 
428  const Troi *b_panel = _B_transposed +
429  (multi * roundup(_args._Nsize, strategy::out_width()) * _Ktotal) +
430  (k0 * roundup(_args._Nsize, strategy::out_width())) +
431  (n0 * kern_k);
432 
433  IndirectOutputArg<Tr> out_arg(this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc);
434 
435 #ifdef CYCLE_PROFILING
436  auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width()));
437 #endif
438  if (_indirect_buf) {
440 #ifdef CYCLE_PROFILING
441  prof,
442 #endif
443  strat, sections, string_lengths.data(),
444  IndirectInputArg<To>(_indirect_buf + (multi * _args._nbatches * _args._Ksections) + (batch * _args._Ksections) + first_section, m_start, first_offset),
445  (m_end - m_start), (nmax - n0), kern_k, b_panel, out_arg,
446  (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
447  last_pass ? _args._act : Activation(),
448  !first_pass,
449  // Quantization parameters
450  _os, _col_bias+(multi * _args._Nsize), n0);
451  } else if (_convolver) {
452  auto conv_cols = _convolver->process_columns(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride), this->_lda, k0, kmax, _rounded_Ksize);
453 
454  unsigned int pos=0;
455  auto conv_rows = conv_cols.process_rows(m_start, m_end - m_start);
456 
457  while (!conv_rows.finished()) {
458  unsigned int width, conv_offset;
459 
460  assert(pos < sections);
461 
462  std::tie(width, conv_offset) = conv_rows.next_block(&(in_row_ptrs[pos * strategy::out_height()]));
463 
464  if (pos==0) {
465  assert(conv_offset == first_offset);
466  }
467  assert(width == string_lengths[pos]);
468  pos++;
469  }
470  assert(pos == sections);
471 
473 #ifdef CYCLE_PROFILING
474  prof,
475 #endif
476  strat, sections, string_lengths.data(),
477  IndirectInputArg<To>(in_row_strings.data(), 0, first_offset),
478  (m_end - m_start), (nmax - n0), kern_k, b_panel, out_arg,
479  (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
480  last_pass ? _args._act : Activation(),
481  !first_pass,
482  // Quantization parameters
483  _os, _col_bias+(multi * _args._Nsize), n0);
484  } else {
485  // Length to process. This needs to exclude padding, but 'kmax' potentially includes it.
486  const unsigned int len = (std::min(_args._Ksize, kmax) - k0);
487 
489 #ifdef CYCLE_PROFILING
490  prof,
491 #endif
492  strat, 1, &len,
493  IndirectInputArg<To>(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + m_start * this->_lda + k0, this->_lda),
494  (m_end - m_start), (nmax - n0), kern_k, b_panel, out_arg,
495  (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
496  last_pass ? _args._act : Activation(),
497  !first_pass,
498  // Quantization parameters
499  _os, _col_bias+(multi * _args._Nsize), n0);
500  }
501  } while (process_all_rows ? p.next_dim1() : p.next_dim0());
502  }
503  }
504 
505  // Interface implementation - pretransposed
506  bool B_is_pretransposed() const override {
507  return true;
508  }
509 
510  bool B_pretranspose_required() const override {
511  return (_B_transposed==nullptr);
512  }
513 
514  size_t get_B_pretransposed_array_size() const override {
515  // Start with actual pretransposed buffer...
516  size_t size = roundup(_args._Nsize, strategy::out_width()) * _Ktotal * _args._nmulti * sizeof(Troi);
517 
518  // Space for result row pointers (not strictly needed any more but retained for indirect output testing)
519  size += _args._Msize * _args._nbatches * _args._nmulti * sizeof(const Tr *);
520 
521  if (std::is_same<OutputStage, Requantize32>::value) {
522  size += get_col_sum_size();
523  }
524 
525  return size;
526  }
527 
528  void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
529  if (std::is_same<OutputStage, Requantize32>::value) {
530  _col_bias = reinterpret_cast<int32_t *>(in_buffer);
531 
532  Requantize32 *qp_ptr = reinterpret_cast<Requantize32 *>(&_os);
533 
534  for (unsigned int i=0; i<_args._nmulti; i++) {
535  // The input is assumed not to have any padding between sections, so straightforward Ksize * Ksections computation gets the total size.
536  compute_col_sums(*qp_ptr, _args._Nsize, _args._Ksize * _args._Ksections, B + (i * B_multi_stride), ldb, _col_bias + (i * _args._Nsize), _args._Ksize * _args._Ksections, i, 0);
537  }
538  }
539  }
540 
541  void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
542  requantize_bias(in_buffer, B, ldb, B_multi_stride);
543 
544  // Put the transposed data after the column sums - in non-transposing cases get_col_sum_size() == 0
545  uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
546  Troi *buffer = reinterpret_cast<Troi *>(buffer_int + get_col_sum_size());
547  _B_transposed = buffer;
548 
549  strategy strat(_args._ci);
550 
551  for (unsigned int multi=0; multi<_args._nmulti; multi++) {
552  for (unsigned int k0=0; k0<_Ktotal; k0+=_k_block) {
553  const unsigned int kmax=std::min(k0 + _k_block, _Ktotal);
554 
555  /* Figure out the size of each block. */
556  unsigned int k_size = kmax - k0;
557 
558  if (_args._Ksections > 1) {
559  // We need to insert padding at the end of each K section.
560  // The computation needed is a little delicate - the coordinates from the block walker are expressed in
561  // terms of the full, padded, _Ktotal.
562  // But we need to transform each section with reference to the original, unpadded, input, letting the
563  // transform pad each section as needed.
564 
565  // This is needed for computations below.
566  const unsigned int rounded_section_size = roundup(_args._Ksize, strategy::k_unroll());
567 
568  // The expected output format is also an entire <out_width> columns interleaved, then the next set of
569  // columns, and so on. This means, as we are breaking it up vertically, we have to do it one column at
570  // a time.
571  for (unsigned int x0=0; x0 < _args._Nsize; x0 += strategy::out_width() ){
572  unsigned int xmax = std::min(x0 + strategy::out_width(), _args._Nsize);
573 
574  // Track where we are and how much work is left.
575  unsigned int kpos = k0;
576  unsigned int kleft = k_size;
577 
578  while (kleft) {
579  // Which section are we in? Based on the rounded-up section size.
580  unsigned int k_section_base = kpos / rounded_section_size;
581  // How far into the section are we?
582  unsigned int k_offset = kpos - (k_section_base * rounded_section_size);
583 
584  // We will either copy the rest of this section, or to the end of the requested length.
585  unsigned int k_length = std::min(_args._Ksize - k_offset, kleft);
586 
587  strat.transforms.PrepareB(buffer, B + (multi * B_multi_stride), ldb,
588  x0, xmax,
589  (k_section_base * _args._Ksize) + k_offset, // K starting point - compute row to read based on our section and the true section length.
590  (k_section_base * _args._Ksize) + k_offset + k_length); // K end point - starting point plus length computed above.
591 
592  // We need to modify our position based on the ROUNDED version of what we just did.
593  unsigned int padded_length = roundup(k_length, strategy::k_unroll());
594 
595  buffer += strategy::out_width() * padded_length;
596 
597  kpos += padded_length;
598  kleft -= padded_length;
599  }
600  }
601  } else {
602  // In the single K section case, can process the whole lot in one go.
603  // Caution: 'blockwalker::kmax()' rounds up, so clamp to valid _Ksize.
604  strat.transforms.PrepareB(buffer, B + (multi * B_multi_stride), ldb,
605  0, _args._Nsize, k0, std::min(kmax, _args._Ksize));
606  buffer += roundup(_args._Nsize, strategy::out_width()) * roundup(kmax-k0, strategy::k_unroll());
607  }
608  }
609  }
610  }
611 
612  void set_pretransposed_B_data(void *in_buffer) override {
613  // Put the transposed data after the column sums - in non-transposing cases get_col_sum_size() == 0
614  uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
615  _B_transposed = reinterpret_cast<Troi *>(buffer_int + get_col_sum_size());
616  _col_bias = reinterpret_cast<int32_t *>(in_buffer);
617  }
618 
619  // Estimate cycles for given problem given provided parameters.
620  // "perf_type" is a type to pass along to get_performance_parameters to get the right set of performance
621  // parameters - it's arbitrary but usually either the input or output type.
622  template <typename perf_type>
623  static uint64_t estimate_cycles(const GemmArgs &args, const OutputStage &os = {}) {
624  const PerformanceParameters params = strategy::template get_performance_parameters<perf_type>(args._ci);
625 
626  // Note: Current hybrid kernels don't actually round up height (they
627  // have paths for each possible height). Might need to make this
628  // configurable in future.
629  uint64_t total_macs = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * roundup(args._Nsize, strategy::out_width()) * get_ktotal(args);
630 
631  float mac_cycles = static_cast<float>(total_macs) / params.kernel_macs_cycle;
632 
633  // TODO: A bit of a kludge here: current hybrid kernels incur extra
634  // overhead where the width is not a multiple of kernel width. It's
635  // most noticable where the overall width is quite low, so add 15%
636  // penalty for such widths.
637  if ((args._Nsize < strategy::out_width()) || (args._Nsize > strategy::out_width() && args._Nsize < 2*strategy::out_width())) {
638  mac_cycles *= 1.15f;
639  }
640 
641  uint64_t total_cycles = mac_cycles;
642 
643  // Quantizing kernels with separate quantize need to add in the extra stages.
644  if (std::is_same<OutputStage, Requantize32>::value && SeparateQuantize) {
645  const Requantize32 *qp = reinterpret_cast<const Requantize32 *>(&os);
646 
647  // Row sums: need to consider each value in A (batch * multi * M * K)...
648  uint64_t rowsum_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * get_ktotal(args);
649 
650  // ... but row sums are skipped if B offset==0.
651  if (qp->b_offset == 0) {
652  rowsum_bytes = 0;
653  }
654 
655  // Use "prepare bytes per cycle" to store "row sum values per cycle".
656  float rowsum_cycles = static_cast<float>(rowsum_bytes) / params.prepare_bytes_cycle;
657 
658  // Requantize: need to consider each value in C (batch * multi * M * N)
659  uint64_t requantize_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * args._Nsize;
660 
661  // Use "merge bytes per cycle" to store "requantize values per cycle".
662  float requantize_cycles = static_cast<float>(requantize_bytes) / params.merge_bytes_cycle;
663 
664  // Recalculate total_cycles with the extra components.
665  total_cycles = mac_cycles + rowsum_cycles + requantize_cycles;
666  }
667 
668  return total_cycles;
669  }
670 
671  void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override {
672  if (std::is_same<OutputStage, Requantize32>::value) {
673  Requantize32 *qp = reinterpret_cast<Requantize32 *>(&_os);
674 
675  qp->bias = bias;
676  qp->bias_multi_stride = bias_multi_stride;
677  }
678  }
679 
680  void set_indirect_parameters(size_t string_len, const To * const * const *ptr) override {
681  assert(string_len == _args._Ksize);
682  _indirect_buf = ptr;
683  }
684 
686  assert(parms.input_channels == _args._Ksize);
687  _convolver = std::unique_ptr<convolver<To>>(new convolver<To>(parms));
688  }
689 
690  GemmConfig get_config() override {
691  GemmConfig c;
692 
694  c.inner_block_size = _k_block;
695  c.outer_block_size = _n_block;
696  c.filter = get_type_name<strategy>();
697 
698  return c;
699  }
700 };
701 
702 } // namespace arm_gemm
703 
704 #ifdef __I_DEFINED_UNUSED
705 #undef UNUSED
706 #endif
void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override
Main execute member fucntion.
T roundup(const T a, const T b)
Definition: utils.hpp:70
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:1083
const int32_t * bias
Definition: arm_gemm.hpp:130
const CPUInfo * _ci
Definition: arm_gemm.hpp:104
unsigned int _nmulti
Definition: arm_gemm.hpp:110
NDRangeIterator iterator(unsigned int start, unsigned int end) const
Definition: ndrange.hpp:131
size_t get_B_pretransposed_array_size() const override
void set_indirect_parameters(size_t string_len, const To *const *const *ptr) override
unsigned int _Nsize
Definition: arm_gemm.hpp:106
Activation _act
Definition: arm_gemm.hpp:112
T iceildiv(const T a, const T b)
Definition: utils.hpp:65
void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override
unsigned int M
void set_convolution_parameters(ConvolutionParameters parms) override
SimpleTensor< T2 > accumulate(const SimpleTensor< T1 > &src, DataType output_data_type)
Definition: Accumulate.cpp:38
const GemmConfig * _cfg
Definition: arm_gemm.hpp:115
unsigned int inner_block_size
Definition: arm_gemm.hpp:70
void row_sums_indirect(unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg< T > A_arg, size_t M, int32_t *output_ptr, const Requantize32 *qp)
unsigned int outer_block_size
Definition: arm_gemm.hpp:71
void set_pretransposed_B_data(void *in_buffer) override
#define UNUSED(x)
int_t get_position(int_t d) const
Definition: ndrange.hpp:176
GemmMethod method
Definition: arm_gemm.hpp:68
unsigned int N
static uint64_t estimate_cycles(const GemmArgs &args, const OutputStage &os={})
GemmHybridIndirect(const GemmArgs &args, const OutputStage &os)
GemmHybridIndirect(const GemmArgs &args)
const StratType * strategy
unsigned int _Msize
Definition: arm_gemm.hpp:105
unsigned int sections
void requantize_block_32(const Requantize32 &qp, unsigned int width, unsigned int height, const Tin *input, unsigned int in_stride, Tout *output, unsigned int out_stride, const int32_t *row_bias, const int32_t *col_bias, unsigned int start_col)
bool B_is_pretransposed() const override
NDCoordinate builds upon a range, but specifies a starting position in addition to a size which it in...
Definition: ndrange.hpp:151
unsigned int total_size() const
Definition: ndrange.hpp:136
void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override
void compute_col_sums(const Requantize32 &qp, unsigned int width, unsigned int height, const T *input, unsigned int in_stride, int32_t *col_bias, unsigned int depth, unsigned int multi, unsigned int first_col)
unsigned int _Ksections
Definition: arm_gemm.hpp:108
ndrange_t get_window_size() const override
std::string filter
Definition: arm_gemm.hpp:69
bool supports_dynamic_scheduling() const override
unsigned int _Ksize
Definition: arm_gemm.hpp:107
unsigned int _nbatches
Definition: arm_gemm.hpp:109
bool B_pretranspose_required() const override
int_t get_position_end(int_t d) const
Definition: ndrange.hpp:190
void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override
const int32_t * bias