Compute Library
 22.08
gemm_hybrid_quantized_inline.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2019,2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #pragma once
25 
26 #include <assert.h>
27 
28 #include <algorithm>
29 
30 #include "arm_gemm.hpp"
31 #include "ndrange.hpp"
32 #include "utils.hpp"
33 
34 #include "mergeresults.hpp"
35 #include "transform.hpp"
36 
37 #ifdef CYCLE_PROFILING
38 #include "profiler.hpp"
39 #endif
40 
41 namespace arm_gemm {
42 
43 // Implementation of the GemmCommon abstract class.
44 template<typename strategy, typename To, typename Tr>
45 class GemmHybridQuantizedInline : public GemmCommon<To, Tr> {
46  typedef typename strategy::operand_type Toi;
47  typedef typename strategy::result_type Tri;
48 
49  /* const properties set by constructor */
50  const CPUInfo * const _ci;
51 
52  const unsigned int _Msize;
53  const unsigned int _Nsize;
54  const unsigned int _Ksize;
55 
56  const unsigned int _nbatches;
57  const unsigned int _nmulti;
58 
59  /* Blocking info */
60  const unsigned int _k_block;
61  const unsigned int _n_block;
62  const unsigned int _Mround;
63 
64  /* Pretransposed buffer. */
65  const Toi *_B_transposed=nullptr;
66 
67  const NDRange<4> _window_range;
68 
69  Requantize32 _qp;
70  int32_t *col_bias = nullptr;
71 
72  void *working_space = nullptr;
73 
74  unsigned int _nthreads;
75 
76  unsigned int get_col_sum_size() const {
77  return _Nsize * _nmulti * sizeof(int32_t);
78  }
79 
80  static unsigned int compute_k_block(const GemmArgs &args) {
81  // We don't support K blocks as we only temporarily store 32 bit results.
82  return args._Ksize;
83 
84  if (args._cfg && args._cfg->inner_block_size) {
85  return args._cfg->inner_block_size;
86  }
87 
88  const unsigned int L1_size = args._ci->get_L1_cache_size();
89 
90  // k_block: Find out how much of the larger array can be loaded into half the cache.
91  // This should account for associative caches.
92  unsigned int k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
93 
94  // Needs to be (at least a single) multiple of the K unroll level.
95  k_block /= strategy::k_unroll();
96  k_block = std::max(k_block, 1U) * strategy::k_unroll();
97 
98  // Now tune to presented problem size; this is how many blocks we need.
99  unsigned int numk_blocks = iceildiv(args._Ksize, k_block);
100 
101  // So divide the space equally into that many blocks.
102  k_block = iceildiv(args._Ksize, numk_blocks);
103 
104  // And round UP to the K unroll level required.
105  k_block = roundup(k_block, strategy::k_unroll());
106 
107  return k_block;
108  }
109 
110  static unsigned int compute_n_block(const GemmArgs &args) {
111  if (args._cfg && args._cfg->outer_block_size) {
112  return args._cfg->outer_block_size;
113  }
114 
115  const unsigned int k_block = compute_k_block(args);
116  const unsigned int L2_size = args._ci->get_L2_cache_size();
117 
118  // n_block: Work out how many rows (of length k_block) will fit in the L2
119  // Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents.
120  unsigned int n_block = (((L2_size * 9) / 10) - (k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_height()))) /
121  (sizeof(Toi) * k_block);
122 
123  // Needs to be (at least a single) multiple of the kernel output width.
124  n_block /= strategy::out_width();
125  n_block = std::max(n_block, 1U) * strategy::out_width();
126 
127  // And tune to the presented problem size.
128  unsigned int numblocks = iceildiv(args._Nsize, n_block);
129  n_block = iceildiv(args._Nsize, numblocks);
130  n_block = roundup(n_block, strategy::out_width());
131 
132  return n_block;
133  }
134 
135 public:
138 
139  /* Constructor */
141  : _ci(args._ci), _Msize(args._Msize), _Nsize(args._Nsize), _Ksize(args._Ksize),
142  _nbatches(args._nbatches), _nmulti(args._nmulti),
143  _k_block(compute_k_block(args)), _n_block(compute_n_block(args)),
144  _Mround(roundup(args._Msize, strategy::out_height())),
145  _window_range(iceildiv(args._Msize, strategy::out_height()), _nbatches, iceildiv(_Nsize, _n_block), _nmulti),
146  _qp (qp), _nthreads(args._maxthreads) { }
147 
148  // Interface implementation - Compulsory functions
149  ndrange_t get_window_size() const override {
150  return { _window_range.total_size() };
151  }
152 
153  // This kernel can always be dynamically scheduled.
154  bool supports_dynamic_scheduling() const override {
155  return true;
156  }
157 
158  // Execute
159  void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override {
160 #ifdef CYCLE_PROFILING
161  profiler prof;
162 #endif
163  strategy strat(_ci);
164 
165  /* Make sure we've been set up correctly. */
166  assert(_B_transposed);
167  static_assert(std::is_same<To, Toi>::value, "gemm_native: Operand types must be the same.");
168 
169  /* For now, each work item implies all the K for a given output
170  * pixel (so we don't need to synchronize access to the output
171  * array). So separate the loop over K blocks here. */
172  for (unsigned int k0=0; k0<_Ksize; k0+=_k_block) {
173  unsigned int kmax = std::min(k0 + _k_block, _Ksize);
174  unsigned int kern_k = roundup(kmax-k0, strategy::k_unroll());
175 
176  auto p = _window_range.iterator(work_range.get_position(0), work_range.get_position_end(0));
177 
178  if (p.done()) {
179  return;
180  }
181 
182  do {
183  const unsigned int m_start = p.dim(0) * strategy::out_height();
184  const unsigned int m_end = std::min(p.dim0_max() * strategy::out_height(), _Msize);
185  const unsigned int batch = p.dim(1);
186  const unsigned int n0 = p.dim(2) * _n_block;
187  const unsigned int nmax = std::min(n0 + _n_block, _Nsize);
188  const unsigned int multi = p.dim(3);
189 
190  const Toi *b_panel = _B_transposed +
191  (multi * roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_unroll())) +
192  (k0 * roundup(_Nsize, strategy::out_width())) +
193  (n0 * kern_k);
194 
195  {
196 #ifdef CYCLE_PROFILING
197  auto p = prof.ScopedProfiler(PROFILE_KERNEL, (m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width()));
198 #endif
199  strat.kernel(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + (m_start * this->_lda) + k0, this->_lda,
200  b_panel,
201  this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc,
202  (m_end - m_start), (nmax - n0), kmax - k0,
203  col_bias + (multi * _Nsize) + n0, _qp);
204  }
205  } while (p.next_dim1());
206  }
207  }
208 
209  // Interface implementation - pretransposed
210  bool B_is_pretransposed() const override {
211  return true;
212  }
213 
214  bool B_pretranspose_required() const override {
215  return (_B_transposed==nullptr);
216  }
217 
218  size_t get_B_pretransposed_array_size() const override {
219  return get_col_sum_size() + (roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_unroll()) * _nmulti * sizeof(Toi));
220  }
221 
222  void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
223  col_bias = reinterpret_cast<int32_t *>(in_buffer);
224 
225  for (unsigned int i=0; i<_nmulti; i++) {
226  compute_col_sums(_qp, _Nsize, _Ksize, B + (i * B_multi_stride), ldb, col_bias + (i * _Nsize), _Ksize, i, 0);
227  }
228  }
229 
230  void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
231  requantize_bias(in_buffer, B, ldb, B_multi_stride);
232 
233  uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
234  Toi *buffer = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size());
235  _B_transposed = buffer;
236  strategy strat(_ci);
237 
238  for (unsigned int multi=0; multi<_nmulti; multi++) {
239  for (unsigned int k0=0; k0<_Ksize; k0+=_k_block) {
240  const unsigned int kmax = std::min(k0 + _k_block, _Ksize);
241  const unsigned int k_size = roundup(kmax-k0, strategy::k_unroll());
242 
243  for (unsigned int x0=0; x0<_Nsize; x0+=_n_block) {
244  const unsigned int xmax = std::min(x0+_n_block, _Nsize);
245 
246  const unsigned int size = roundup(xmax-x0, strategy::out_width()) * k_size;
247 
248  strat.transforms.PrepareB( buffer, B + (multi * B_multi_stride), ldb,
249  x0, xmax, k0, kmax);
250 
251  buffer += size;
252  }
253  }
254  }
255  }
256 
257  void set_pretransposed_B_data(void *in_buffer) override {
258  uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
259  _B_transposed = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size());
260  col_bias = reinterpret_cast<int32_t *>(in_buffer);
261  }
262 
263  void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override {
264  _qp.bias = bias;
265  _qp.bias_multi_stride = bias_multi_stride;
266  }
267 };
268 
269 } // namespace arm_gemm
T roundup(const T a, const T b)
Definition: utils.hpp:70
const int32_t * bias
Definition: arm_gemm.hpp:172
const CPUInfo * _ci
Definition: arm_gemm.hpp:145
NDRangeIterator iterator(unsigned int start, unsigned int end) const
Definition: ndrange.hpp:131
unsigned int _Nsize
Definition: arm_gemm.hpp:147
GemmHybridQuantizedInline & operator=(GemmHybridQuantizedInline &)=delete
T iceildiv(const T a, const T b)
Definition: utils.hpp:65
const GemmConfig * _cfg
Definition: arm_gemm.hpp:157
unsigned int inner_block_size
Definition: arm_gemm.hpp:110
unsigned int outer_block_size
Definition: arm_gemm.hpp:111
void set_pretransposed_B_data(void *in_buffer) override
int_t get_position(int_t d) const
Definition: ndrange.hpp:176
void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override
GemmHybridQuantizedInline(GemmHybridQuantizedInline &)=delete
void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override
const StratType * strategy
unsigned int get_L1_cache_size() const
Gets the L1 cache size.
Definition: CPPTypes.cpp:129
unsigned int get_L2_cache_size() const
Gets the L2 cache size.
Definition: CPPTypes.cpp:134
void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override
Main execute member fucntion.
GemmHybridQuantizedInline(const GemmArgs &args, const Requantize32 &qp)
NDCoordinate builds upon a range, but specifies a starting position in addition to a size which it in...
Definition: ndrange.hpp:151
unsigned int total_size() const
Definition: ndrange.hpp:136
void compute_col_sums(const Requantize32 &qp, unsigned int width, unsigned int height, const T *input, unsigned int in_stride, int32_t *col_bias, unsigned int depth, unsigned int multi, unsigned int first_col)
unsigned int _Ksize
Definition: arm_gemm.hpp:148
int_t get_position_end(int_t d) const
Definition: ndrange.hpp:190
void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override
const int32_t * bias