Compute Library
 23.05
gemm_hybrid_indirect.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2023 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #pragma once
25 
26 #if !defined(_WIN64) && !defined(__OpenBSD__)
27 #include <alloca.h>
28 #endif /* !defined(_WIN64) && !defined(__OpenBSD__) */
29 
30 #include <algorithm>
31 #include <cassert>
32 
33 #include "arm_gemm.hpp"
34 #include "bias_adder.hpp"
35 #include "convolver.hpp"
36 #include "kernel_weight_format.hpp"
37 #include "ndrange.hpp"
39 #include "transform.hpp"
40 #include "utils.hpp"
41 
42 #ifdef CYCLE_PROFILING
43 #include "profiler.hpp"
44 #endif
45 
46 #ifndef UNUSED
47 #define __I_DEFINED_UNUSED
48 #define UNUSED(x) ((void)(x))
49 #endif
50 
51 namespace arm_gemm {
52 
53 namespace {
54 
55 // We need to invoke the kernel differently for quantizing and non-quantizing cases, so here is a shim class to do
56 // that.
57 
58 template<typename OutputStage, bool SeparateQuantize, bool FixedFormat>
59 class run_hybrid_kernel {
60 public:
61  template<typename strategy, typename Tlo, typename Tro, typename Tr>
62  static inline void run (
63 #ifdef CYCLE_PROFILING
64  profiler &prof,
65 #endif
66  const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
67  unsigned int kern_k, const Tro *b_ptr, size_t b_stride, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
68  const OutputStage &os, const int32_t *col_bias, unsigned int n_0 );
69 };
70 
71 template<>
72 template<typename strategy, typename Tlo, typename Tro, typename Tr>
74 #ifdef CYCLE_PROFILING
75  profiler &prof,
76 #endif
77  const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
78  unsigned int kern_k, const Tro *b_ptr, size_t, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
79  const Nothing &, const int32_t *, unsigned int) {
80 #ifdef CYCLE_PROFILING
81  auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
82 #endif
83  UNUSED(kern_k);
84 
85  /* Indirect hybrid kernels read the full width of the bias. So we need to detect the case where we are writing
86  * a partial block and pad the bias for that block. */
87  if (bias_ptr && !accumulate && (N % strategy::out_width() != 0)) {
88  /* Break N into "N_bulk" (a multiple of output width) and "N_remainder" */
89  unsigned int N_remainder = N % strategy::out_width();
90  unsigned int N_bulk = N - N_remainder;
91 
92  /* Output argument to be used for the tail */
93  IndirectOutputArg<Tr> offset_output = output_arg;
94 
95  /* If there is a "bulk" to be processed, handle that and update "offset_output" appropriately. */
96  if (N_bulk > 0) {
97  strat.kernel(num_strings, string_ptr, A_arg, M, N_bulk, b_ptr, output_arg, bias_ptr, act, accumulate);
98 
99  if (output_arg.is_indirect) {
100  offset_output = IndirectOutputArg<Tr>(output_arg.indirect.ptr, output_arg.indirect.offset + N_bulk);
101  } else {
102  offset_output = IndirectOutputArg<Tr>(output_arg.direct.base + N_bulk, output_arg.direct.stride);
103  }
104  }
105 
106  /* Pad the bias buffer for the remainder */
107  Tr *bias_pad_buffer = reinterpret_cast<Tr *>(alloca(strategy::out_width() * sizeof(Tr)));
108  memcpy(bias_pad_buffer, bias_ptr + N_bulk, N_remainder * sizeof(Tr));
109 
110  /* Process the remainder, offsetting the B pointer as needed. */
111  strat.kernel(num_strings, string_ptr, A_arg, M, N_remainder, b_ptr + (N_bulk * kern_k), offset_output, bias_pad_buffer, act, accumulate);
112  } else {
113  strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, output_arg, bias_ptr, act, accumulate);
114  }
115 }
116 
117 template<>
118 template<typename strategy, typename Tlo, typename Tro, typename Tr>
120 #ifdef CYCLE_PROFILING
121  profiler &prof,
122 #endif
123  const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
124  unsigned int kern_k, const Tro *b_ptr, size_t b_stride, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
125  const Nothing &, const int32_t *, unsigned int) {
126 #ifdef CYCLE_PROFILING
127  auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
128 #endif
129  UNUSED(kern_k);
130 
131  /* Indirect hybrid kernels read the full width of the bias. So we need to detect the case where we are writing
132  * a partial block and pad the bias for that block. */
133  if (bias_ptr && !accumulate && (N % strategy::out_width() != 0)) {
134  /* Break N into "N_bulk" (a multiple of output width) and "N_remainder" */
135  unsigned int N_remainder = N % strategy::out_width();
136  unsigned int N_bulk = N - N_remainder;
137 
138  /* Output argument to be used for the tail */
139  IndirectOutputArg<Tr> offset_output = output_arg;
140 
141  /* If there is a "bulk" to be processed, handle that and update "offset_output" appropriately. */
142  if (N_bulk > 0) {
143  strat.kernel(num_strings, string_ptr, A_arg, M, N_bulk, b_ptr, b_stride, output_arg, bias_ptr, act, accumulate);
144 
145  if (output_arg.is_indirect) {
146  offset_output = IndirectOutputArg<Tr>(output_arg.indirect.ptr, output_arg.indirect.offset + N_bulk);
147  } else {
148  offset_output = IndirectOutputArg<Tr>(output_arg.direct.base + N_bulk, output_arg.direct.stride);
149  }
150  }
151 
152  /* Pad the bias buffer for the remainder */
153  Tr *bias_pad_buffer = reinterpret_cast<Tr *>(alloca(strategy::out_width() * sizeof(Tr)));
154  memcpy(bias_pad_buffer, bias_ptr + N_bulk, N_remainder * sizeof(Tr));
155 
156  /* Process the remainder, offsetting the B pointer as needed. */
157  strat.kernel(num_strings, string_ptr, A_arg, M, N_remainder,
158  b_ptr + (N_bulk / strategy::stripe_width()) * b_stride, b_stride, offset_output,
159  bias_pad_buffer, act, accumulate);
160  } else {
161  strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, b_stride, output_arg, bias_ptr, act, accumulate);
162  }
163 }
164 
165 template<>
166 template<typename strategy, typename Tlo, typename Tro, typename Tr>
168 #ifdef CYCLE_PROFILING
169  profiler &prof,
170 #endif
171  const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
172  unsigned int kern_k, const Tro *b_ptr, size_t, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
173  const Requantize32 &os, const int32_t *col_bias, unsigned int n_0 ) {
174 #ifdef CYCLE_PROFILING
175  auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
176 #endif
177  UNUSED(kern_k);
178 
179  strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, output_arg, &os, col_bias + n_0, n_0);
180 }
181 
182 template<>
183 template<typename strategy, typename Tlo, typename Tro, typename Tr>
185 #ifdef CYCLE_PROFILING
186  profiler &prof,
187 #endif
188  const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
189  unsigned int kern_k, const Tro *b_ptr, size_t, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
190  const Requantize32 &os, const int32_t *col_bias, unsigned int n_0 ) {
191  UNUSED(kern_k);
192  // On this route we will only process one kernel height at a time and will make sure this happens in the driver loop.
193  assert(M <= strategy::out_height());
194  // We don't yet support indirect output (as the quantizer can't do it).
195  assert(output_arg.is_indirect == false);
196 
197  // We need a row sum buffer and intermediate output buffer.
198  // These go on the stack as they are not too large, using an automatic array and alloca() respectively.
199  int32_t row_sums[strategy::out_height()];
200  typename strategy::result_type *result_buffer;
201 
202  unsigned int output_width = roundup(N, strategy::out_width());
203 
204  result_buffer = reinterpret_cast<typename strategy::result_type *>(alloca(output_width * strategy::out_height() * sizeof(typename strategy::result_type)));
205 
206  {
207 #ifdef CYCLE_PROFILING
208  auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
209 #endif
210  // Perform the GEMM, into the output buffer.
211  strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, IndirectOutputArg<typename strategy::result_type>(result_buffer, output_width), nullptr, Activation(), false);
212  }
213 
214  if (os.b_offset != 0) {
215 #ifdef CYCLE_PROFILING
216  auto p = prof.ScopedProfiler(PROFILE_ROWSUMS, (unsigned long)M * kern_k);
217 #endif
218  row_sums_indirect(num_strings, string_ptr, A_arg, M, row_sums, &os);
219  } else {
220  memset(row_sums, 0, sizeof(int32_t) * strategy::out_height());
221  }
222 
223  {
224 #ifdef CYCLE_PROFILING
225  auto p = prof.ScopedProfiler(PROFILE_QUANTIZE, (unsigned long)M * N);
226 #endif
227  // Quantize
228  requantize_block_32(os, N, M, result_buffer, output_width, output_arg.direct.base, output_arg.direct.stride, row_sums, col_bias + n_0, n_0);
229  }
230 }
231 
232 template<typename strategy, bool FixedFormat>
233 struct stripe_width {
234  static unsigned int get() {
235  return strategy::stripe_width();
236  }
237 };
238 
239 template<typename strategy>
240 struct stripe_width<strategy, false> {
241  static unsigned int get() {
242  return 0;
243  }
244 };
245 
246 template<typename strategy, bool FixedFormat>
247 struct kernel_weight_format {
248  static KernelWeightFormat get() {
249  return strategy::kernel_weight_format();
250  }
251 };
252 
253 template<typename strategy>
254 struct kernel_weight_format<strategy, false> {
255  static KernelWeightFormat get() {
257  }
258 };
259 
260 } // anonymous namespace
261 
262 // Implementation of the GemmCommon abstract class.
263 template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing, bool SeparateQuantize=false, bool FixedFormat=false>
264 class GemmHybridIndirect : public GemmCommon<To, Tr> {
265  typedef typename strategy::lhs_operand_type Tloi;
266  typedef typename strategy::rhs_operand_type Troi;
267  typedef typename strategy::result_type Tri;
268 
269  GemmArgs _args;
270  OutputStage _os = {};
271 
272  /* Quantized support (in addition to 'output stage' above) */
273  int32_t *_col_bias = nullptr;
274 
275  const unsigned int _Ktotal;
276  const unsigned int _rounded_Ksize;
277 
278  /* Blocking info */
279  const unsigned int _k_block;
280  const unsigned int _n_block;
281  const unsigned int _Mround;
282 
283  /* Pretransposed buffer. */
284  const Troi *_B_transposed=nullptr;
285 
286  /* Indirect parameters. _indirect_buf doubles as a flag to indicate that "indirect" transform should be used. */
287  const To * const * const * _indirect_buf = nullptr;
288 
289  /* Convolver - only set up for convolution problems, so also doubles as a flag. */
290  std::unique_ptr<convolver<To>> _convolver = nullptr;
291 
292  // Array of pointers to output rows
293 // Tr * const * _output_ptrs;
294 
295  const NDRange<4> _window_range;
296 
297  unsigned int get_col_sum_size() const {
298  if (std::is_same<OutputStage, Requantize32>::value) {
299  return _args._Nsize * _args._nmulti * sizeof(int32_t);
300  } else {
301  return 0;
302  }
303  }
304 
305  static unsigned int get_ktotal(const GemmArgs &args) {
306  return args._Ksections * roundup(args._Ksize, strategy::k_unroll());
307  }
308 
309  static unsigned int compute_k_block(const GemmArgs &args) {
310  // Some kernels don't support accumulate mode - these can't do K blocking at all.
311  if (!strategy::supports_accumulate() || std::is_same<OutputStage, Requantize32>::value) {
312  return get_ktotal(args);
313  }
314 
315  if (args._cfg && args._cfg->inner_block_size) {
316  return roundup(args._cfg->inner_block_size, strategy::k_unroll());
317  }
318 
319  // Experimental data suggests an optimal block size of 512 for FP32 (scaling accordingly for other
320  // datatypes); but don't divide into blocks until we hit 1.5X this size.
321  unsigned int target_block_size = 2048 / sizeof(To);
322  auto ktotal = get_ktotal(args);
323 
324  if (ktotal > ((target_block_size*3)/2)) {
325  unsigned int target_blocks = iceildiv(ktotal, target_block_size);
326 
327  unsigned int block_size = iceildiv(ktotal, target_blocks);
328 
329  block_size = roundup(block_size, strategy::k_unroll());
330 
331  return block_size;
332  }
333 
334  return ktotal;
335  }
336 
337  // New N blocking strategy: if it's narrow, or much taller than it is wide, do the full width. Otherwise do a
338  // single block.
339  static unsigned int compute_n_block(const GemmArgs &args, const OutputStage os = {}) {
340  if (args._cfg && args._cfg->outer_block_size) {
341  return args._cfg->outer_block_size;
342  }
343 
344  if (args._Nsize <= 64) {
345  return args._Nsize;
346  }
347 
348  if ((args._Msize / args._Nsize) > 155) {
349  return args._Nsize;
350  }
351 
352  // "Asymmetric" quantizing GEMMs require a different approach - the tall skinny blocks we would otherwise
353  // use imply a great deal of repeated work performing the row sums. If row sums are involved, work out how
354  // much "column" parallelism is going to be required and set the block size accordingly.
355  if (std::is_same<OutputStage, Requantize32>::value) {
356  const Requantize32 *qp = reinterpret_cast<const Requantize32 *>(&os);
357 
358  // Row sums only needed if b_offset isn't 0
359  if (qp->b_offset != 0) {
360  // We can already parallelize across batches, multis and rows (in units of 'out_height')
361  int multi_row_parallelism = args._nmulti * args._nbatches * iceildiv(args._Msize, strategy::out_height());
362 
363  // If this isn't enough, we will need to split up the columns too.
364  if (multi_row_parallelism < args._maxthreads) {
365  unsigned int columns_needed = iceildiv(args._maxthreads, multi_row_parallelism);
366 
367  unsigned int n_block = iceildiv(args._Nsize, columns_needed);
368 
369  return roundup(n_block, strategy::out_width());
370  }
371 
372  // Multi/Batch/Row parallelism is enough - don't split up the columns.
373  return args._Nsize;
374  }
375  }
376 
377  if (args._Ksize <= 128 && args._maxthreads <= 16) {
378  return strategy::out_width() * 3;
379  }
380 
381  return strategy::out_width();
382  }
383 
384 public:
386  GemmHybridIndirect & operator= (GemmHybridIndirect &) = delete;
387 
388  /* Constructor */
389  GemmHybridIndirect(const GemmArgs &args, const OutputStage &os)
390  : _args(args), _os(os), _Ktotal(get_ktotal(args)),
391  _rounded_Ksize(roundup(args._Ksize, strategy::k_unroll())),
392  _k_block(compute_k_block(args)), _n_block(compute_n_block(args, os)),
393  _Mround(roundup(args._Msize, strategy::out_height())),
394  _window_range(iceildiv(args._Msize, strategy::out_height()), args._nbatches,
395  iceildiv(args._Nsize, _n_block), args._nmulti)
396  {
397  // We take a copy of the arguments (not a pointer or reference), but there is no lifetime requirement on the
398  // GemmConfig. Clear out the pointer to avoid accidents.
399  _args._cfg = nullptr;
400  }
401 
402  /* Constructor without OutputStage */
404  : _args(args), _Ktotal(get_ktotal(args)),
405  _rounded_Ksize(roundup(args._Ksize, strategy::k_unroll())),
406  _k_block(compute_k_block(args)), _n_block(compute_n_block(args)),
407  _Mround(roundup(args._Msize, strategy::out_height())),
408  _window_range(iceildiv(args._Msize, strategy::out_height()), args._nbatches,
409  iceildiv(args._Nsize, _n_block), args._nmulti)
410  {
411  // We take a copy of the arguments (not a pointer or reference), but there is no lifetime requirement on the
412  // GemmConfig. Clear out the pointer to avoid accidents.
413  _args._cfg = nullptr;
414  }
415 
416  // Interface implementation - Compulsory functions
417  ndrange_t get_window_size() const override {
418  return { _window_range.total_size() };
419  }
420 
421  // This kernel can always be dynamically scheduled.
422  bool supports_dynamic_scheduling() const override {
423  return true;
424  }
425 
426  // Execute
427  void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override {
428 #ifdef CYCLE_PROFILING
429  profiler prof;
430 #endif
431  strategy strat(_args._ci);
432 
433  std::vector<const To *> in_row_ptrs;
434  std::vector<const To * const *> in_row_strings;
435  std::vector<unsigned int> string_lengths;
436 
437  // In convolution mode, we need input pointers.
438  if (_convolver) {
439  in_row_ptrs = std::vector<const To *>(strategy::out_height() * _args._Ksections, nullptr);
440  in_row_strings = std::vector<const To * const *>(_args._Ksections, nullptr);
441 
442  for (unsigned int i=0; i<_args._Ksections; i++) {
443  in_row_strings[i] = &(in_row_ptrs[i * strategy::out_height()]);
444  }
445  }
446 
447  // In any indirect mode, we need the string lengths.
448  if (_args._indirect_input) {
449  string_lengths = std::vector<unsigned int>(_args._Ksections, 0);
450  }
451 
452  /* Make sure we've been set up correctly. */
453  assert(FixedFormat || _B_transposed);
454  static_assert(std::is_same<To, Tloi>::value, "gemm_native: Operand types must be the same.");
455 // static_assert(std::is_same<Tr, Tri>::value, "gemm_native: Result types must be the same.");
456 
457  /* For now, each work item implies all the K for a given output
458  * pixel (so we don't need to synchronize access to the output
459  * array). So separate the loop over K blocks here. */
460  for (unsigned int k0=0; k0<_Ktotal; k0+=_k_block) {
461  unsigned int kmax = std::min(k0 + _k_block, _Ktotal);
462  unsigned int kern_k = roundup(kmax-k0, strategy::k_unroll());
463 
464  const bool first_pass = (k0 == 0);
465  const bool last_pass = (kmax == _Ktotal);
466 
467  unsigned int first_section = (k0 / _rounded_Ksize);
468  unsigned int first_offset = (k0 % _rounded_Ksize);
469  unsigned int kleft = kern_k;
470  unsigned int sections=0;
471  unsigned int offset = first_offset;
472 
473  if (_args._indirect_input) {
474  while (kleft) {
475  // When chopping into sections: the amount that goes into 'string_lengths' is the amount to be
476  // processed (excluding padding). But the amount we subtract from 'kleft' takes account of any
477  // padding applied.
478  string_lengths[sections] = std::min(kleft, _args._Ksize - offset);
479  kleft -= std::min(kleft, _rounded_Ksize - offset);
480  sections++;
481  offset=0;
482  }
483  }
484 
485  auto p = _window_range.iterator(work_range.get_position(0), work_range.get_position_end(0));
486 
487  if (p.done()) {
488  return;
489  }
490 
491  // Process rows either 'out_height' rows at a time, or do all valid rows at once with a single kernel call.
492  // The separate quantizer path only handles one block of rows at a time (as it has to store sums and intermediate results).
493  // THe convolution path only generates the pointers for one block of rows at a time.
494  const bool process_all_rows = (!SeparateQuantize && !_convolver);
495 
496  do {
497  const unsigned int m_start = p.dim(0) * strategy::out_height();
498  const unsigned int m_end = process_all_rows ? std::min(p.dim0_max() * strategy::out_height(), _args._Msize) : std::min(m_start + strategy::out_height(), _args._Msize);
499 // const unsigned int m_end = std::min(m_start + strategy::out_height(), _args._Msize);
500  const unsigned int batch = p.dim(1);
501  const unsigned int n0 = p.dim(2) * _n_block;
502  const unsigned int nmax = std::min(n0 + _n_block, _args._Nsize);
503  const unsigned int multi = p.dim(3);
504 
505  const Troi *b_panel;
506  if (FixedFormat) {
507  b_panel = reinterpret_cast<const Troi *>(this->_Bptr) +
508  (multi * this->_B_multi_stride) +
509  ((n0 / stripe_width<strategy, FixedFormat>::get()) * this->_ldb) +
510  (k0 * stripe_width<strategy, FixedFormat>::get());
511  } else {
512  b_panel = _B_transposed +
513  (multi * roundup(_args._Nsize, strategy::out_width()) * _Ktotal) +
514  (k0 * roundup(_args._Nsize, strategy::out_width())) +
515  (n0 * kern_k);
516  }
517 
518  IndirectOutputArg<Tr> out_arg(this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc);
519 
520 #ifdef CYCLE_PROFILING
521  auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width()));
522 #endif
523  if (_indirect_buf) {
525 #ifdef CYCLE_PROFILING
526  prof,
527 #endif
528  strat, sections, string_lengths.data(),
529  IndirectInputArg<To>(_indirect_buf + (multi * _args._nbatches * _args._Ksections) + (batch * _args._Ksections) + first_section, m_start, first_offset),
530  (m_end - m_start), (nmax - n0), kern_k, b_panel, this->_ldb, out_arg,
531  (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
532  last_pass ? _args._act : Activation(),
533  !first_pass,
534  // Quantization parameters
535  _os, _col_bias+(multi * _args._Nsize), n0);
536  } else if (_convolver) {
537  auto conv_cols = _convolver->process_columns(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride), this->_lda, k0, kmax, _rounded_Ksize);
538 
539  unsigned int pos=0;
540  auto conv_rows = conv_cols.process_rows(m_start, m_end - m_start);
541 
542  while (!conv_rows.finished()) {
543  unsigned int width, conv_offset;
544 
545  assert(pos < sections);
546 
547  std::tie(width, conv_offset) = conv_rows.next_block(&(in_row_ptrs[pos * strategy::out_height()]));
548 
549  if (pos==0) {
550  assert(conv_offset == first_offset);
551  }
552  assert(width == string_lengths[pos]);
553  pos++;
554  }
555  assert(pos == sections);
556 
558 #ifdef CYCLE_PROFILING
559  prof,
560 #endif
561  strat, sections, string_lengths.data(),
562  IndirectInputArg<To>(in_row_strings.data(), 0, first_offset),
563  (m_end - m_start), (nmax - n0), kern_k, b_panel, this->_ldb, out_arg,
564  (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
565  last_pass ? _args._act : Activation(),
566  !first_pass,
567  // Quantization parameters
568  _os, _col_bias+(multi * _args._Nsize), n0);
569  } else {
570  // Length to process. This needs to exclude padding, but 'kmax' potentially includes it.
571  const unsigned int len = (std::min(_args._Ksize, kmax) - k0);
572 
574 #ifdef CYCLE_PROFILING
575  prof,
576 #endif
577  strat, 1, &len,
578  IndirectInputArg<To>(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + m_start * this->_lda + k0, this->_lda),
579  (m_end - m_start), (nmax - n0), kern_k, b_panel, this->_ldb, out_arg,
580  (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
581  last_pass ? _args._act : Activation(),
582  !first_pass,
583  // Quantization parameters
584  _os, _col_bias+(multi * _args._Nsize), n0);
585  }
586  } while (process_all_rows ? p.next_dim1() : p.next_dim0());
587  }
588  }
589 
590  // Interface implementation - pretransposed
591  bool B_is_pretransposed() const override {
592  return (FixedFormat == false);
593  }
594 
595  bool B_pretranspose_required() const override {
596  return (FixedFormat == false) && (_B_transposed==nullptr);
597  }
598 
599  size_t get_B_pretransposed_array_size() const override {
600  if (FixedFormat) {
601  return 0;
602  }
603 
604  // Start with actual pretransposed buffer...
605  size_t size = roundup(_args._Nsize, strategy::out_width()) * _Ktotal * _args._nmulti * sizeof(Troi);
606 
607  // Space for result row pointers (not strictly needed any more but retained for indirect output testing)
608  size += _args._Msize * _args._nbatches * _args._nmulti * sizeof(const Tr *);
609 
610  if (std::is_same<OutputStage, Requantize32>::value) {
611  size += get_col_sum_size();
612  }
613 
614  return size;
615  }
616 
617  size_t get_B_pretranspose_window_size() const override {
618  return _args._nmulti * iceildiv(_args._Nsize, strategy::out_width());
619  }
620 
621  void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
622  if (std::is_same<OutputStage, Requantize32>::value) {
623  _col_bias = reinterpret_cast<int32_t *>(in_buffer);
624 
625  Requantize32 *qp_ptr = reinterpret_cast<Requantize32 *>(&_os);
626 
627  for (unsigned int i=0; i<_args._nmulti; i++) {
628  // The input is assumed not to have any padding between sections, so straightforward Ksize * Ksections computation gets the total size.
629  compute_col_sums(*qp_ptr, _args._Nsize, _args._Ksize * _args._Ksections, B + (i * B_multi_stride), ldb, _col_bias + (i * _args._Nsize), _args._Ksize * _args._Ksections, i, 0);
630  }
631  }
632  }
633 
634  void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
635  pretranspose_B_array_part(in_buffer, B, ldb, B_multi_stride, 0, get_B_pretranspose_window_size());
636  }
637 
638  void pretranspose_B_array_part(void *in_buffer, const To *B, const int ldb, const int B_multi_stride, size_t start, size_t end) override {
639  if (end >= get_B_pretranspose_window_size()) {
640  requantize_bias(in_buffer, B, ldb, B_multi_stride);
641  }
642 
643  // Put the transposed data after the column sums - in non-transposing cases get_col_sum_size() == 0
644  uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
645  Troi *buffer_base = reinterpret_cast<Troi *>(buffer_int + get_col_sum_size());
646  _B_transposed = buffer_base;
647 
648  strategy strat(_args._ci);
649  size_t work_per_multi = iceildiv(_args._Nsize, strategy::out_width());
650 
651  for (unsigned int multi=(start / work_per_multi); multi<_args._nmulti; multi++) {
652  // Work out which part of the window space this multi occupies,
653  // skip to the next multi or exit as needed.
654  size_t wk_start = multi * work_per_multi;
655  size_t wk_end = (multi + 1) * work_per_multi;
656 
657  assert(wk_end > start);
658 
659  if (wk_start >= end) {
660  break;
661  }
662 
663  for (unsigned int k0=0; k0<_Ktotal; k0+=_k_block) {
664  const unsigned int kmax=std::min(k0 + _k_block, _Ktotal);
665 
666  /* Figure out the size of each block. */
667  unsigned int k_size = kmax - k0;
668 
669  // Correct the N range and buffer base if we are not processing the whole block.
670  size_t n_start = 0;
671  size_t n_end = _args._Nsize;
672 
673  // If we are not doing the first columns, update the buffer write position and starting N value.
674  if (start > wk_start) {
675  n_start = (start - wk_start) * strategy::out_width();
676  }
677 
678  // If we are not doing the last items, update the final N value.
679  if (end < wk_end) {
680  n_end = (end - wk_start) * strategy::out_width();
681  }
682 
683  // Set the buffer pointer
684  Troi *buffer = buffer_base +
685  (roundup(_args._Nsize, strategy::out_width()) * (multi * _Ktotal + k0)) +
686  (n_start * roundup(k_size, strategy::k_unroll()));
687 
688  if (_args._Ksections > 1) {
689  // We need to insert padding at the end of each K section.
690  // The computation needed is a little delicate - the k0/kmax coordinates are expressed in
691  // terms of the full, padded, _Ktotal.
692  // But we need to transform each section with reference to the original, unpadded, input, letting the
693  // transform pad each section as needed.
694 
695  // This is needed for computations below.
696  const unsigned int rounded_section_size = roundup(_args._Ksize, strategy::k_unroll());
697 
698  // The expected output format is also an entire <out_width> columns interleaved, then the next set of
699  // columns, and so on. This means, as we are breaking it up vertically, we have to do it one column at
700  // a time.
701  for (unsigned int x0 = n_start; x0 < n_end; x0 += strategy::out_width()) {
702  unsigned int xmax = std::min(x0 + strategy::out_width(), _args._Nsize);
703 
704  // Track where we are and how much work is left.
705  unsigned int kpos = k0;
706  unsigned int kleft = k_size;
707 
708  while (kleft) {
709  // Which section are we in? Based on the rounded-up section size.
710  unsigned int k_section_base = kpos / rounded_section_size;
711  // How far into the section are we?
712  unsigned int k_offset = kpos - (k_section_base * rounded_section_size);
713 
714  // We will either copy the rest of this section, or to the end of the requested length.
715  unsigned int k_length = std::min(_args._Ksize - k_offset, kleft);
716 
717  strat.transforms.PrepareB(buffer, B + (multi * B_multi_stride), ldb,
718  x0, xmax,
719  (k_section_base * _args._Ksize) + k_offset, // K starting point - compute row to read based on our section and the true section length.
720  (k_section_base * _args._Ksize) + k_offset + k_length); // K end point - starting point plus length computed above.
721 
722  // We need to modify our position based on the ROUNDED version of what we just did.
723  unsigned int padded_length = roundup(k_length, strategy::k_unroll());
724 
725  buffer += strategy::out_width() * padded_length;
726 
727  kpos += padded_length;
728  kleft -= padded_length;
729  }
730  }
731  } else {
732  // In the single K section case, can process the whole lot in one go.
733  strat.transforms.PrepareB(buffer, B + (multi * B_multi_stride), ldb,
734  n_start, n_end, k0, std::min(kmax, _args._Ksize));
735  }
736  }
737  }
738  }
739 
740  void set_pretransposed_B_data(void *in_buffer) override {
741  // Put the transposed data after the column sums - in non-transposing cases get_col_sum_size() == 0
742  uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
743  _B_transposed = reinterpret_cast<Troi *>(buffer_int + get_col_sum_size());
744  _col_bias = reinterpret_cast<int32_t *>(in_buffer);
745  }
746 
747  // Estimate cycles for given problem given provided parameters.
748  // "perf_type" is a type to pass along to get_performance_parameters to get the right set of performance
749  // parameters - it's arbitrary but usually either the input or output type.
750  template <typename perf_type>
751  static uint64_t estimate_cycles(const GemmArgs &args, const OutputStage &os = {}) {
752  const PerformanceParameters params = strategy::template get_performance_parameters<perf_type>(args._ci);
753 
754  // Note: Current hybrid kernels don't actually round up height (they
755  // have paths for each possible height). Might need to make this
756  // configurable in future.
757  uint64_t total_macs = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * roundup(args._Nsize, strategy::out_width()) * get_ktotal(args);
758 
759  float mac_cycles = static_cast<float>(total_macs) / params.kernel_macs_cycle;
760 
761  // TODO: A bit of a kludge here: current hybrid kernels incur extra
762  // overhead where the width is not a multiple of kernel width. It's
763  // most noticable where the overall width is quite low, so add 15%
764  // penalty for such widths.
765  if ((args._Nsize < strategy::out_width()) || (args._Nsize > strategy::out_width() && args._Nsize < 2*strategy::out_width())) {
766  mac_cycles *= 1.15f;
767  }
768 
769  uint64_t total_cycles = mac_cycles;
770 
771  // Quantizing kernels with separate quantize need to add in the extra stages.
772  if (std::is_same<OutputStage, Requantize32>::value && SeparateQuantize) {
773  const Requantize32 *qp = reinterpret_cast<const Requantize32 *>(&os);
774 
775  // Row sums: need to consider each value in A (batch * multi * M * K)...
776  uint64_t rowsum_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * get_ktotal(args);
777 
778  // ... but row sums are skipped if B offset==0.
779  if (qp->b_offset == 0) {
780  rowsum_bytes = 0;
781  }
782 
783  // Use "prepare bytes per cycle" to store "row sum values per cycle".
784  float rowsum_cycles = static_cast<float>(rowsum_bytes) / params.prepare_bytes_cycle;
785 
786  // Requantize: need to consider each value in C (batch * multi * M * N)
787  uint64_t requantize_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * args._Nsize;
788 
789  // Use "merge bytes per cycle" to store "requantize values per cycle".
790  float requantize_cycles = static_cast<float>(requantize_bytes) / params.merge_bytes_cycle;
791 
792  // Recalculate total_cycles with the extra components.
793  total_cycles = mac_cycles + rowsum_cycles + requantize_cycles;
794  }
795 
796  return total_cycles;
797  }
798 
799  void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override {
800  if (std::is_same<OutputStage, Requantize32>::value) {
801  Requantize32 *qp = reinterpret_cast<Requantize32 *>(&_os);
802 
803  qp->bias = bias;
804  qp->bias_multi_stride = bias_multi_stride;
805  }
806  }
807 
808  void set_indirect_parameters(size_t string_len, const To * const * const *ptr) override {
809  assert(string_len == _args._Ksize);
810  _indirect_buf = ptr;
811  }
812 
814  assert(parms.input_channels == _args._Ksize);
815  _convolver = std::unique_ptr<convolver<To>>(new convolver<To>(parms));
816  }
817 
818  GemmConfig get_config() override {
819  GemmConfig c;
820 
822  c.inner_block_size = _k_block;
823  c.outer_block_size = _n_block;
824  c.filter = get_type_name<strategy>();
825  c.weight_format = get_weight_format(kernel_weight_format<strategy, FixedFormat>::get(), sizeof(To));
826 
827  return c;
828  }
829 };
830 
831 template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
833 
834 } // namespace arm_gemm
835 
836 #ifdef __I_DEFINED_UNUSED
837 #undef UNUSED
838 #endif
void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override
Main execute member fucntion.
T roundup(const T a, const T b)
Definition: utils.hpp:70
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:1110
const int32_t * bias
Definition: arm_gemm.hpp:172
const CPUInfo * _ci
Definition: arm_gemm.hpp:145
unsigned int _nmulti
Definition: arm_gemm.hpp:151
NDRangeIterator iterator(unsigned int start, unsigned int end) const
Definition: ndrange.hpp:131
size_t get_B_pretransposed_array_size() const override
void set_indirect_parameters(size_t string_len, const To *const *const *ptr) override
unsigned int _Nsize
Definition: arm_gemm.hpp:147
Activation _act
Definition: arm_gemm.hpp:153
WeightFormat get_weight_format(const KernelWeightFormat, size_t)
Definition: misc.cpp:40
T iceildiv(const T a, const T b)
Definition: utils.hpp:65
void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override
unsigned int M
void set_convolution_parameters(ConvolutionParameters parms) override
SimpleTensor< T2 > accumulate(const SimpleTensor< T1 > &src, DataType output_data_type)
Definition: Accumulate.cpp:38
const GemmConfig * _cfg
Definition: arm_gemm.hpp:157
unsigned int inner_block_size
Definition: arm_gemm.hpp:110
void row_sums_indirect(unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg< T > A_arg, size_t M, int32_t *output_ptr, const Requantize32 *qp)
unsigned int outer_block_size
Definition: arm_gemm.hpp:111
void set_pretransposed_B_data(void *in_buffer) override
#define UNUSED(x)
int_t get_position(int_t d) const
Definition: ndrange.hpp:176
unsigned int N
static uint64_t estimate_cycles(const GemmArgs &args, const OutputStage &os={})
WeightFormat weight_format
Definition: arm_gemm.hpp:112
GemmHybridIndirect(const GemmArgs &args, const OutputStage &os)
GemmHybridIndirect(const GemmArgs &args)
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:290
const StratType * strategy
unsigned int _Msize
Definition: arm_gemm.hpp:146
unsigned int sections
void requantize_block_32(const Requantize32 &qp, unsigned int width, unsigned int height, const Tin *input, unsigned int in_stride, Tout *output, unsigned int out_stride, const int32_t *row_bias, const int32_t *col_bias, unsigned int start_col)
bool B_is_pretransposed() const override
NDCoordinate builds upon a range, but specifies a starting position in addition to a size which it in...
Definition: ndrange.hpp:151
unsigned int total_size() const
Definition: ndrange.hpp:136
void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override
void compute_col_sums(const Requantize32 &qp, unsigned int width, unsigned int height, const T *input, unsigned int in_stride, int32_t *col_bias, unsigned int depth, unsigned int multi, unsigned int first_col)
unsigned int _Ksections
Definition: arm_gemm.hpp:149
size_t get_B_pretranspose_window_size() const override
ndrange_t get_window_size() const override
std::string filter
Definition: arm_gemm.hpp:109
void pretranspose_B_array_part(void *in_buffer, const To *B, const int ldb, const int B_multi_stride, size_t start, size_t end) override
bool supports_dynamic_scheduling() const override
unsigned int _Ksize
Definition: arm_gemm.hpp:148
unsigned int _nbatches
Definition: arm_gemm.hpp:150
bool B_pretranspose_required() const override
int_t get_position_end(int_t d) const
Definition: ndrange.hpp:190
void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override
const int32_t * bias