Compute Library
 23.05
quantize_wrapper.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #pragma once
26 
27 #include "arm_gemm.hpp"
28 
29 #include "barrier.hpp"
30 #include "gemm_implementation.hpp"
31 #include "quantized.hpp"
32 
33 namespace arm_gemm {
34 
35 /* Quantized wrapper - do an integer GEMM and wrap around the quantization. */
36 
37 template<typename To, typename Tr, typename Tgemm>
38 class QuantizeWrapper : public GemmCommon<To, Tr> {
39 private:
40  UniqueGemmCommon<To, Tgemm> _subgemm = nullptr;
41  int32_t *_row_sums = nullptr;
42  int32_t *_col_sums = nullptr;
43  Requantize32 _params;
44  GemmArgs _args;
45  barrier _barrier;
46 
47  void *working_space = nullptr;
48  bool arrays_set = false;
49 
50  /* We need a subgemm which outputs the 32-bit intermediates - how much space is needed for that? */
51  size_t subgemm_output_size() const {
52  return (_args._Msize * _args._Nsize * _args._nbatches * _args._nmulti * sizeof(int32_t));
53  }
54 
55  size_t col_sum_size() const {
56  return (_args._Nsize * _args._nmulti * sizeof(int32_t));
57  }
58 
59  size_t row_sum_size() const {
60  return (_args._Msize * _args._nbatches * _args._nmulti * sizeof(int32_t));
61  }
62 
63  /* Local working space: We need space for the subgemm output (above) and
64  * the row sums. */
65  size_t local_working_size() const {
66  return subgemm_output_size() + row_sum_size();
67  }
68 
69  void set_child_arrays() {
70  if (working_space == nullptr || arrays_set == false)
71  return;
72 
73  /* Use the first part of our working space for the subgemm result, pass the operand details straight through. */
74  _subgemm->set_arrays(this->_Aptr, this->_lda, this->_A_batch_stride, this->_A_multi_stride,
75  this->_Bptr, this->_ldb, this->_B_multi_stride,
76  reinterpret_cast<Tgemm *>(working_space), _args._Nsize, (_args._Nsize * _args._Msize), (_args._Nsize * _args._Msize * _args._nbatches),
77  nullptr, 0);
78  }
79 
80  void col_sums_pretransposed(const To *B, const int ldb, const int B_multi_stride) {
81  for (unsigned int multi=0; multi<_args._nmulti; multi++) {
82  compute_col_sums(_params, _args._Nsize, _args._Ksize, B + (multi * B_multi_stride), ldb, _col_sums + (multi * _args._Nsize), _args._Ksize, multi, 0);
83  }
84  }
85 
86  void requantize_runtime(unsigned int threadid) {
87  unsigned int first_row = (threadid * _args._Msize) / _args._maxthreads;
88  unsigned int last_row = ((threadid+1) * _args._Msize) / _args._maxthreads;
89 
90  for (unsigned int multi=0; multi<_args._nmulti; multi++) {
91  for (unsigned int batch=0; batch<_args._nbatches; batch++) {
92  /* Compute row sums now */
93  compute_row_sums(_params, _args._Ksize, (last_row - first_row), this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + (first_row * this->_lda),
94  this->_lda, _row_sums + (multi * _args._nbatches * _args._Msize) + (batch * _args._Msize) + first_row);
95  // If we don't care about negative values, call the version of this function that doesn't correct before shifting.
96  // 'c_offset' represents zero, so if the lowest possible quantized output value is the same or more than that we will not output negative numbers.
97  requantize_block_32(_params, _args._Nsize, (last_row - first_row),
98  reinterpret_cast<Tgemm *>(working_space) + (multi * (_args._Msize * _args._Nsize * _args._nbatches)) + (batch * (_args._Msize * _args._Nsize)) + (first_row * _args._Nsize),
99  _args._Nsize,
100  this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (first_row * this->_ldc), this->_ldc,
101  _row_sums + (multi * _args._nbatches * _args._Msize) + (batch * _args._Msize) + first_row,
102  _col_sums + (multi * _args._Nsize), 0);
103  }
104  }
105  }
106 
107 
108 public:
109  QuantizeWrapper(const QuantizeWrapper &) = delete;
110  QuantizeWrapper operator=(const QuantizeWrapper &) = delete;
111 
112  QuantizeWrapper(const GemmArgs &args, const Requantize32 &qp) : _params(qp), _args(args), _barrier(args._maxthreads) {
113  GemmArgs newargs = GemmArgs(args._ci, args._Msize, args._Nsize, args._Ksize, args._Ksections, args._nbatches, args._nmulti, args._indirect_input, Activation(), args._maxthreads);
114  _subgemm = gemm<To, Tgemm>(newargs);
115 
116  if (_subgemm == nullptr) {
117  return;
118  }
119  }
120 
121  void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride,
122  const To *B, const int ldb, const int B_multi_stride,
123  Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride,
124  const Tr *bias, const int bias_multi_stride) override {
125  GemmCommon<To, Tr>::set_arrays(A, lda, A_batch_stride, A_multi_stride, B, ldb, B_multi_stride, C, ldc, C_batch_stride, C_multi_stride, bias, bias_multi_stride);
126 
127  arrays_set = true;
128  set_child_arrays();
129  }
130 
131  ndrange_t get_window_size() const override {
132  return { _subgemm->get_window_size() };
133  }
134 
135  void set_nthreads(int nthreads) override {
136  _subgemm->set_nthreads(nthreads);
137  _barrier.set_nthreads(nthreads);
138  _args._maxthreads = nthreads;
139  }
140 
141  void execute(const ndcoord_t &work_range, const ndcoord_t &thread_locator, int threadid) override {
142  _subgemm->execute(work_range, thread_locator, threadid);
143 
144  _barrier.arrive_and_wait();
145 
146  requantize_runtime(threadid);
147  }
148 
149  size_t get_working_size() const override {
150  return _subgemm->get_working_size() + local_working_size();
151  }
152 
153  // Space arrangement:
154 
155  // ptr
156  // V
157  // | subgemm output | row_sums | subgemm working space |
158  void set_working_space(void *space) override {
159  uintptr_t space_int = reinterpret_cast<uintptr_t>(space);
160 
161  working_space = space;
162  _subgemm->set_working_space(reinterpret_cast<void *>(space_int + local_working_size()));
163 
164  _row_sums = reinterpret_cast<int32_t *>(space_int + subgemm_output_size());
165 
166  set_child_arrays();
167  }
168 
169  bool B_is_pretransposed() const override {
170  /* We clear this flag if the subgemm isn't pretransposed, so just return its value */
171  return _subgemm->B_is_pretransposed();
172  }
173 
174  bool B_pretranspose_required() const override {
175  return _subgemm->B_pretranspose_required();
176  }
177 
178  size_t get_B_pretransposed_array_size() const override {
179  return _subgemm->get_B_pretransposed_array_size() + col_sum_size();
180  }
181 
182  void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
183  _col_sums = reinterpret_cast<int32_t *>(in_buffer);
184  col_sums_pretransposed(B, ldb, B_multi_stride);
185  }
186 
187  void pretranspose_B_array(void *buffer, const To *B, const int ldb, const int B_multi_stride) override {
188  uintptr_t buffer_int = reinterpret_cast<uintptr_t>(buffer);
189  _subgemm->pretranspose_B_array(reinterpret_cast<void *>(buffer_int + col_sum_size()), B, ldb, B_multi_stride);
190 
191  requantize_bias(buffer, B, ldb, B_multi_stride);
192  }
193 
194  void set_pretransposed_B_data(void *buffer) override {
195  uintptr_t buffer_int = reinterpret_cast<uintptr_t>(buffer);
196  _subgemm->set_pretransposed_B_data(reinterpret_cast<void *>(buffer_int + col_sum_size()));
197  _col_sums = reinterpret_cast<int32_t *>(buffer);
198  }
199 
200  void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override {
201  _params.bias = bias;
202  _params.bias_multi_stride = bias_multi_stride;
203  }
204 
205  GemmConfig get_config() override {
206  GemmConfig c = _subgemm->get_config();
207 
208  std::string n = "quantize_wrapper[";
209  n.append(c.filter);
210  n.append("]");
211 
213  c.filter = n;
214 
215  return c;
216  }
217 };
218 
219 } // namespace arm_gemm
void compute_row_sums(const Requantize32 &qp, unsigned int width, unsigned int height, const T *input, unsigned int in_stride, int32_t *row_bias)
const int32_t * bias
Definition: arm_gemm.hpp:172
void set_working_space(void *space) override
void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override
const CPUInfo * _ci
Definition: arm_gemm.hpp:145
void set_nthreads(int nthreads) override
unsigned int _nmulti
Definition: arm_gemm.hpp:151
unsigned int _Nsize
Definition: arm_gemm.hpp:147
void set_pretransposed_B_data(void *buffer) override
QuantizeWrapper operator=(const QuantizeWrapper &)=delete
size_t get_working_size() const override
GemmConfig get_config() override
void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override
ndrange_t get_window_size() const override
for(size_t k=0;k< _target.size();++k)
Definition: Unstack.cpp:91
void pretranspose_B_array(void *buffer, const To *B, const int ldb, const int B_multi_stride) override
void arrive_and_wait()
Definition: barrier.hpp:47
void set_nthreads(unsigned int nthreads)
Definition: barrier.hpp:43
unsigned int _Msize
Definition: arm_gemm.hpp:146
QuantizeWrapper(const QuantizeWrapper &)=delete
void requantize_block_32(const Requantize32 &qp, unsigned int width, unsigned int height, const Tin *input, unsigned int in_stride, Tout *output, unsigned int out_stride, const int32_t *row_bias, const int32_t *col_bias, unsigned int start_col)
virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, const To *B, const int ldb, const int B_multi_stride, Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride, const Tr *bias, const int bias_multi_stride)
std::unique_ptr< GemmCommon< Top, Tret > > UniqueGemmCommon
Definition: arm_gemm.hpp:216
void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, const To *B, const int ldb, const int B_multi_stride, Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride, const Tr *bias, const int bias_multi_stride) override
size_t get_B_pretransposed_array_size() const override
NDCoordinate builds upon a range, but specifies a starting position in addition to a size which it in...
Definition: ndrange.hpp:151
void compute_col_sums(const Requantize32 &qp, unsigned int width, unsigned int height, const T *input, unsigned int in_stride, int32_t *col_bias, unsigned int depth, unsigned int multi, unsigned int first_col)
unsigned int _Ksections
Definition: arm_gemm.hpp:149
std::string filter
Definition: arm_gemm.hpp:109
bool B_pretranspose_required() const override
unsigned int _Ksize
Definition: arm_gemm.hpp:148
bool B_is_pretransposed() const override
unsigned int _nbatches
Definition: arm_gemm.hpp:150
void execute(const ndcoord_t &work_range, const ndcoord_t &thread_locator, int threadid) override
Main execute member fucntion.
const int32_t * bias
QuantizeWrapper(const GemmArgs &args, const Requantize32 &qp)