37 template<
typename To,
typename Tr,
typename Tgemm>
41 int32_t *_row_sums =
nullptr;
42 int32_t *_col_sums =
nullptr;
47 void *working_space =
nullptr;
48 bool arrays_set =
false;
51 size_t subgemm_output_size()
const {
55 size_t col_sum_size()
const {
59 size_t row_sum_size()
const {
65 size_t local_working_size()
const {
66 return subgemm_output_size() + row_sum_size();
69 void set_child_arrays() {
70 if (working_space ==
nullptr || arrays_set ==
false)
74 _subgemm->set_arrays(this->_Aptr, this->_lda, this->_A_batch_stride, this->_A_multi_stride,
75 this->_Bptr, this->_ldb, this->_B_multi_stride,
80 void col_sums_pretransposed(
const To *B,
const int ldb,
const int B_multi_stride) {
81 for (
unsigned int multi=0; multi<_args.
_nmulti; multi++) {
86 void requantize_runtime(
unsigned int threadid) {
90 for (
unsigned int multi=0; multi<_args.
_nmulti; multi++) {
93 compute_row_sums(_params, _args.
_Ksize, (last_row - first_row), this->_Aptr + (multi * this->_A_multi_stride) + (
batch * this->_A_batch_stride) + (first_row * this->_lda),
100 this->_Cptr + (multi * this->_C_multi_stride) + (
batch * this->_C_batch_stride) + (first_row * this->_ldc), this->_ldc,
102 _col_sums + (multi * _args.
_Nsize), 0);
114 _subgemm = gemm<To, Tgemm>(newargs);
116 if (_subgemm ==
nullptr) {
121 void set_arrays(
const To *A,
const int lda,
const int A_batch_stride,
const int A_multi_stride,
122 const To *B,
const int ldb,
const int B_multi_stride,
123 Tr *C,
const int ldc,
const int C_batch_stride,
const int C_multi_stride,
124 const Tr *
bias,
const int bias_multi_stride)
override {
125 GemmCommon<To, Tr>::set_arrays(A, lda, A_batch_stride, A_multi_stride, B, ldb, B_multi_stride, C, ldc, C_batch_stride, C_multi_stride, bias, bias_multi_stride);
132 return { _subgemm->get_window_size() };
136 _subgemm->set_nthreads(nthreads);
142 _subgemm->execute(work_range, thread_locator, threadid);
146 requantize_runtime(threadid);
150 return _subgemm->get_working_size() + local_working_size();
159 uintptr_t space_int =
reinterpret_cast<uintptr_t
>(space);
161 working_space = space;
162 _subgemm->set_working_space(reinterpret_cast<void *>(space_int + local_working_size()));
164 _row_sums =
reinterpret_cast<int32_t *
>(space_int + subgemm_output_size());
171 return _subgemm->B_is_pretransposed();
175 return _subgemm->B_pretranspose_required();
179 return _subgemm->get_B_pretransposed_array_size() + col_sum_size();
182 void requantize_bias(
void *in_buffer,
const To *B,
const int ldb,
const int B_multi_stride)
override {
183 _col_sums =
reinterpret_cast<int32_t *
>(in_buffer);
184 col_sums_pretransposed(B, ldb, B_multi_stride);
188 uintptr_t buffer_int =
reinterpret_cast<uintptr_t
>(buffer);
189 _subgemm->pretranspose_B_array(reinterpret_cast<void *>(buffer_int + col_sum_size()), B, ldb, B_multi_stride);
195 uintptr_t buffer_int =
reinterpret_cast<uintptr_t
>(buffer);
196 _subgemm->set_pretransposed_B_data(reinterpret_cast<void *>(buffer_int + col_sum_size()));
197 _col_sums =
reinterpret_cast<int32_t *
>(buffer);
208 std::string
n =
"quantize_wrapper[";
void compute_row_sums(const Requantize32 &qp, unsigned int width, unsigned int height, const T *input, unsigned int in_stride, int32_t *row_bias)
void set_working_space(void *space) override
void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override
void set_nthreads(int nthreads) override
void set_pretransposed_B_data(void *buffer) override
QuantizeWrapper operator=(const QuantizeWrapper &)=delete
size_t get_working_size() const override
GemmConfig get_config() override
void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override
ndrange_t get_window_size() const override
for(size_t k=0;k< _target.size();++k)
void pretranspose_B_array(void *buffer, const To *B, const int ldb, const int B_multi_stride) override
void set_nthreads(unsigned int nthreads)
QuantizeWrapper(const QuantizeWrapper &)=delete
void requantize_block_32(const Requantize32 &qp, unsigned int width, unsigned int height, const Tin *input, unsigned int in_stride, Tout *output, unsigned int out_stride, const int32_t *row_bias, const int32_t *col_bias, unsigned int start_col)
virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, const To *B, const int ldb, const int B_multi_stride, Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride, const Tr *bias, const int bias_multi_stride)
std::unique_ptr< GemmCommon< Top, Tret > > UniqueGemmCommon
void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, const To *B, const int ldb, const int B_multi_stride, Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride, const Tr *bias, const int bias_multi_stride) override
size_t get_B_pretransposed_array_size() const override
NDCoordinate builds upon a range, but specifies a starting position in addition to a size which it in...
void compute_col_sums(const Requantize32 &qp, unsigned int width, unsigned int height, const T *input, unsigned int in_stride, int32_t *col_bias, unsigned int depth, unsigned int multi, unsigned int first_col)
bool B_pretranspose_required() const override
bool B_is_pretransposed() const override
void execute(const ndcoord_t &work_range, const ndcoord_t &thread_locator, int threadid) override
Main execute member fucntion.
QuantizeWrapper(const GemmArgs &args, const Requantize32 &qp)