Compute Library
 23.08
gemm_common.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021,2023 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #pragma once
25 
27 #include "ndrange.hpp"
28 
29 #include <cstddef>
30 
31 namespace arm_gemm
32 {
33 // Avoid circular dependency with arm_gemm.hpp
34 struct GemmConfig;
35 
36 // Abstract class for the GEMM/GEMV functions.
37 //
38 // GEMM implementations may be "native" (never require any input
39 // permutation), "pretransposed" (require permutation up-front) or require
40 // working space (permute as they go along). This interface should support
41 // all of them.
42 
43 // The real GemmCommon class is templated based on the operand and return
44 // type. This is an interface class which is independent of those types.
46 {
47 public:
48  /* Pass in the pointers to the arrays to be operated on and their
49  * strides. This "generic" version uses void *s, the preferred version
50  * is the one provided by templated GemmCommon (below) which takes
51  * appropriately typed pointers. If B is pretransposed (see below) then
52  * the settings for B here are ignored.
53  */
54  virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride,
55  const void *B, const int ldb, /* batches share B */ const int B_multi_stride,
56  void *C, const int ldc, const int C_batch_stride, const int C_multi_stride,
57  const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) = 0;
58 
59  /** @returns an ndrange containing ranges of the compute space which can be
60  * broken up and parallelised over
61  */
62  virtual ndrange_t get_window_size() const = 0;
63 
64  /* The maximum thread count is specified when the GEMM is created. Some
65  * implementations need to know how many threads will actually run in
66  * order to work properly.
67  *
68  * In some cases, after creating the GEMM the number of threads needs to
69  * be reduced (e.g. not enough work to split across threads). This
70  * method allows the number of actual threads to be run to be set (must
71  * be equal or lower).
72  *
73  * This has an empty default implementation, as GEMMs which don't care
74  * about thread count can safely ignore this.
75  */
76  virtual void set_nthreads(int) {};
77 
78  /* Whether this GEMM can be dynamically scheduled or not. */
79  virtual bool supports_dynamic_scheduling() const
80  {
81  return false;
82  }
83 
84  /** Main execute member fucntion
85  * @param [in] work_range specifies the range of work we want to be computed, total range defined by get_window_size()
86  * @param [in] thread_locator where are we inside of the thread space
87  * @param [in] threadid a unique threadid
88  */
89  virtual void execute(const ndcoord_t &work_range, const ndcoord_t &thread_locator, int threadid) = 0;
90 
91  /*** Working space interface (optional) ***/
92  /* Total number of bytes of temporary working space needed. If zero, it's not necessary to call set_working_space(). */
93  virtual size_t get_working_size() const
94  {
95  return 0;
96  }
97  /* Provide working space buffer - the void * passed in must remain allocated for the duration of any execute calls. */
98  virtual void set_working_space(void *) {};
99 
100  /*** "Pretransposed" interface (optional) ***/
101  /* Is this object set up for pretranspose? If so, pretranspose_array() needs to be called before execute(); */
102  virtual bool B_is_pretransposed() const
103  {
104  return false;
105  }
106  /* Does pretranspose still need to be done? */
107  virtual bool B_pretranspose_required() const
108  {
109  return false;
110  }
111  /* Total number of bytes of space needed for pretransposed arrays. */
112  virtual size_t get_B_pretransposed_array_size() const
113  {
114  return 0;
115  }
116  /* Amount of work for the threaded cases */
117  virtual size_t get_B_pretranspose_window_size() const
118  {
119  return 1;
120  }
121  /* Perform pretranspose - arguments are output, input, input row stride and input multi stride. */
122  /* The "real" version of this depends on the templated operand type (see below). */
123  virtual void pretranspose_B_array_generic(void *, const void *, const int, const int) = 0;
124  /* Threaded version with window start/end parameters */
125  virtual void pretranspose_B_array_part_generic(void *, const void *, const int, const int, const size_t, const size_t) = 0;
126 
127  /* Set pretransposed data - the void * passed in must previously have been passed to pretranspose_B_array() for the same or a similar GEMM. */
128  virtual void set_pretransposed_B_data(void *)
129  {
130  }
131 
132  /*** "Quantized bias" interface (optional) ***/
133  /* Set the bias vector for quantized GEMMs */
134  virtual void set_quantized_bias(const int32_t *, size_t)
135  {
136  }
137 
138  /*** Indirect interface (optional) ***/
139  /* Set the indirect table. This comprises a number of values per kernel point, and a densely packed array of pointers,
140  * multis * batches * kernel_points */
141  virtual void set_indirect_parameters_generic(size_t, const void *const *const *)
142  {
143  }
144 
145  /*** Convolution interface (optional) ***/
146  /* Set the convolution parameters. */
148  {
149  }
150 
151  /*** Introspection interface ***/
152  /* Get the configuration of this GEMM */
153  virtual GemmConfig get_config() = 0;
154 
155  // Destructor
156  virtual ~IGemmCommon()
157  {
158  }
159 };
160 
161 /* "Real" GemmCommon class which is templated on the operand and return types.
162  *
163  * In addition to correctly typed versions of the functions that operate on
164  * operand and return data, this class provides a default implementation of
165  * 'set_arrays' to capture the provided arguments in protected class
166  * members, as essentially any implementation will need these.
167  */
168 template <typename To, typename Tr>
169 class GemmCommon : public IGemmCommon
170 {
171 protected:
172  const To *_Aptr = nullptr;
173  int _lda = 0;
174  int _A_batch_stride = 0;
175  int _A_multi_stride = 0;
176  const To *_Bptr = nullptr;
177  int _ldb = 0;
178  int _B_multi_stride = 0;
179  Tr *_Cptr = nullptr;
180  int _ldc = 0;
181  int _C_batch_stride = 0;
182  int _C_multi_stride = 0;
183  const Tr *_bias = nullptr;
184  int _bias_multi_stride = 0;
185 
186 public:
187  /* Pass in the pointers to the arrays to be operated on and their
188  * strides (templated version with appropriate types). */
189  virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride,
190  const To *B, const int ldb, /* batches share B */ const int B_multi_stride,
191  Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride,
192  const Tr *bias, /* no row or batch stride needed */ const int bias_multi_stride)
193  {
194  _Aptr = A;
195  _lda = lda;
196  _A_batch_stride = A_batch_stride;
197  _A_multi_stride = A_multi_stride;
198  _Bptr = B;
199  _ldb = ldb;
200  _B_multi_stride = B_multi_stride;
201  _Cptr = C;
202  _ldc = ldc;
203  _C_batch_stride = C_batch_stride;
204  _C_multi_stride = C_multi_stride;
205  _bias = bias;
206  _bias_multi_stride = bias_multi_stride;
207  }
208 
209  /* Implementation of the void * overload which casts its arguments to the appropriate type. */
210  void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride,
211  const void *B, const int ldb, /* batches share B */ const int B_multi_stride,
212  void *C, const int ldc, const int C_batch_stride, const int C_multi_stride,
213  const void *bias, /* no row or batch stride needed */ const int bias_multi_stride) override
214  {
215  set_arrays(static_cast<const To *>(A), lda, A_batch_stride, A_multi_stride,
216  static_cast<const To *>(B), ldb, B_multi_stride,
217  static_cast<Tr *>(C), ldc, C_batch_stride, C_multi_stride,
218  static_cast<const Tr *>(bias), bias_multi_stride);
219  }
220 
221  /*** "Pretransposed" interface ***/
222 
223  /* Compute col sums over all columns */
224  virtual void requantize_bias(void *, const To *, const int, const int) {};
225 
226  /* Perform pretranspose - the void * passed in must remain allocated for the duration of any execute calls. */
227  /* Arguments are: output buffer pointer, source pointer, source row stride, source multi stride */
228  virtual void pretranspose_B_array(void *, const To *, const int, const int) {};
229 
230  /* Implementation of the void * overload which casts its arguments to the appropriate type. */
231  void pretranspose_B_array_generic(void *out, const void *in, const int row_stride, const int multi_stride) override
232  {
233  pretranspose_B_array(out, static_cast<const To *>(in), row_stride, multi_stride);
234  }
235 
236  /* Threaded versions of the above.
237  * The fallback/backwards compatible version of the threaded interface exposes a window size of 1 and
238  * just calls the non-threaded functions to do the work. This is valid as with window size of 1 the only
239  * legal values for start and end are 0 and 1 respectively. */
240  virtual void pretranspose_B_array_part(void *out, const To *in, const int row_stride, const int multi_stride, size_t, size_t)
241  {
242  pretranspose_B_array(out, in, row_stride, multi_stride);
243  };
244 
245  void pretranspose_B_array_part_generic(void *out, const void *in, const int row_stride, const int multi_stride, size_t start, size_t end) override
246  {
247  pretranspose_B_array_part(out, static_cast<const To *>(in), row_stride, multi_stride, start, end);
248  }
249 
250  /*** Indirect interface ***/
251  virtual void set_indirect_parameters(size_t, const To *const *const *)
252  {
253  }
254 
255  void set_indirect_parameters_generic(size_t sz, const void *const *const *ptr) override
256  {
257  set_indirect_parameters(sz, reinterpret_cast<const To *const *const *>(ptr));
258  }
259 };
260 
261 } // namespace arm_gemm
arm_gemm::IGemmCommon::pretranspose_B_array_generic
virtual void pretranspose_B_array_generic(void *, const void *, const int, const int)=0
arm_gemm::GemmCommon::pretranspose_B_array_part_generic
void pretranspose_B_array_part_generic(void *out, const void *in, const int row_stride, const int multi_stride, size_t start, size_t end) override
Definition: gemm_common.hpp:245
arm_gemm::GemmCommon::pretranspose_B_array_generic
void pretranspose_B_array_generic(void *out, const void *in, const int row_stride, const int multi_stride) override
Definition: gemm_common.hpp:231
arm_gemm::IGemmCommon::get_config
virtual GemmConfig get_config()=0
arm_gemm::IGemmCommon
Definition: gemm_common.hpp:45
arm_gemm::IGemmCommon::execute
virtual void execute(const ndcoord_t &work_range, const ndcoord_t &thread_locator, int threadid)=0
Main execute member fucntion.
arm_gemm::IGemmCommon::B_pretranspose_required
virtual bool B_pretranspose_required() const
Definition: gemm_common.hpp:107
arm_gemm::NDCoordinate
NDCoordinate builds upon a range, but specifies a starting position in addition to a size which it in...
Definition: ndrange.hpp:151
arm_gemm::ConvolutionParameters
Definition: convolution_parameters.hpp:48
arm_gemm::IGemmCommon::set_arrays_generic
virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, const void *B, const int ldb, const int B_multi_stride, void *C, const int ldc, const int C_batch_stride, const int C_multi_stride, const void *bias, const int bias_multi_stride)=0
arm_gemm::IGemmCommon::set_convolution_parameters
virtual void set_convolution_parameters(ConvolutionParameters)
Definition: gemm_common.hpp:147
convolution_parameters.hpp
arm_gemm::GemmCommon::pretranspose_B_array
virtual void pretranspose_B_array(void *, const To *, const int, const int)
Definition: gemm_common.hpp:228
arm_gemm::IGemmCommon::pretranspose_B_array_part_generic
virtual void pretranspose_B_array_part_generic(void *, const void *, const int, const int, const size_t, const size_t)=0
arm_gemm::IGemmCommon::set_quantized_bias
virtual void set_quantized_bias(const int32_t *, size_t)
Definition: gemm_common.hpp:134
arm_gemm::GemmCommon::set_indirect_parameters_generic
void set_indirect_parameters_generic(size_t sz, const void *const *const *ptr) override
Definition: gemm_common.hpp:255
arm_gemm::IGemmCommon::get_B_pretranspose_window_size
virtual size_t get_B_pretranspose_window_size() const
Definition: gemm_common.hpp:117
arm_gemm::NDRange
Definition: ndrange.hpp:34
arm_gemm::GemmCommon::set_indirect_parameters
virtual void set_indirect_parameters(size_t, const To *const *const *)
Definition: gemm_common.hpp:251
arm_gemm::GemmCommon::set_arrays_generic
void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, const void *B, const int ldb, const int B_multi_stride, void *C, const int ldc, const int C_batch_stride, const int C_multi_stride, const void *bias, const int bias_multi_stride) override
Definition: gemm_common.hpp:210
arm_gemm::GemmCommon::set_arrays
virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, const To *B, const int ldb, const int B_multi_stride, Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride, const Tr *bias, const int bias_multi_stride)
Definition: gemm_common.hpp:189
arm_gemm::IGemmCommon::~IGemmCommon
virtual ~IGemmCommon()
Definition: gemm_common.hpp:156
arm_gemm
Definition: barrier.hpp:30
arm_gemm::IGemmCommon::set_working_space
virtual void set_working_space(void *)
Definition: gemm_common.hpp:98
ndrange.hpp
arm_gemm::IGemmCommon::set_pretransposed_B_data
virtual void set_pretransposed_B_data(void *)
Definition: gemm_common.hpp:128
arm_gemm::GemmConfig
Definition: arm_gemm.hpp:106
arm_gemm::IGemmCommon::set_nthreads
virtual void set_nthreads(int)
Definition: gemm_common.hpp:76
bias
const int32_t * bias
Definition: working_space.hpp:322
arm_gemm::GemmCommon::requantize_bias
virtual void requantize_bias(void *, const To *, const int, const int)
Definition: gemm_common.hpp:224
arm_gemm::GemmCommon::pretranspose_B_array_part
virtual void pretranspose_B_array_part(void *out, const To *in, const int row_stride, const int multi_stride, size_t, size_t)
Definition: gemm_common.hpp:240
arm_gemm::IGemmCommon::set_indirect_parameters_generic
virtual void set_indirect_parameters_generic(size_t, const void *const *const *)
Definition: gemm_common.hpp:141
arm_gemm::IGemmCommon::get_working_size
virtual size_t get_working_size() const
Definition: gemm_common.hpp:93
arm_gemm::IGemmCommon::B_is_pretransposed
virtual bool B_is_pretransposed() const
Definition: gemm_common.hpp:102
arm_compute::mlgo::parser::end
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:290
arm_gemm::IGemmCommon::get_B_pretransposed_array_size
virtual size_t get_B_pretransposed_array_size() const
Definition: gemm_common.hpp:112
arm_gemm::GemmCommon
Definition: gemm_common.hpp:169
arm_gemm::IGemmCommon::get_window_size
virtual ndrange_t get_window_size() const =0
arm_gemm::IGemmCommon::supports_dynamic_scheduling
virtual bool supports_dynamic_scheduling() const
Definition: gemm_common.hpp:79