Compute Library
 23.05
GemmHybrid< strategy, To, Tr > Class Template Reference

#include <gemm_hybrid.hpp>

Collaboration diagram for GemmHybrid< strategy, To, Tr >:
[legend]

Public Member Functions

 GemmHybrid (GemmHybrid &)=delete
 
GemmHybridoperator= (GemmHybrid &)=delete
 
 GemmHybrid (const GemmArgs &args)
 
ndrange_t get_window_size () const override
 
bool supports_dynamic_scheduling () const override
 
void execute (const ndcoord_t &work_range, const ndcoord_t &, int) override
 Main execute member fucntion. More...
 
bool B_is_pretransposed () const override
 
bool B_pretranspose_required () const override
 
size_t get_B_pretransposed_array_size () const override
 
void pretranspose_B_array (void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override
 
void set_pretransposed_B_data (void *in_buffer) override
 
GemmConfig get_config () override
 
- Public Member Functions inherited from GemmCommon< To, Tr >
virtual void set_arrays (const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, const To *B, const int ldb, const int B_multi_stride, Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride, const Tr *bias, const int bias_multi_stride)
 
void set_arrays_generic (const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, const void *B, const int ldb, const int B_multi_stride, void *C, const int ldc, const int C_batch_stride, const int C_multi_stride, const void *bias, const int bias_multi_stride) override
 
virtual void requantize_bias (void *, const To *, const int, const int)
 
void pretranspose_B_array_generic (void *out, const void *in, const int row_stride, const int multi_stride) override
 
virtual void pretranspose_B_array_part (void *out, const To *in, const int row_stride, const int multi_stride, size_t, size_t)
 
void pretranspose_B_array_part_generic (void *out, const void *in, const int row_stride, const int multi_stride, size_t start, size_t end) override
 
virtual void set_indirect_parameters (size_t, const To *const *const *)
 
void set_indirect_parameters_generic (size_t sz, const void *const *const *ptr) override
 
- Public Member Functions inherited from IGemmCommon
virtual void set_nthreads (int)
 
virtual size_t get_working_size () const
 
virtual void set_working_space (void *)
 
virtual size_t get_B_pretranspose_window_size () const
 
virtual void set_quantized_bias (const int32_t *, size_t)
 
virtual void set_convolution_parameters (ConvolutionParameters)
 
virtual ~IGemmCommon ()
 

Static Public Member Functions

static uint64_t estimate_cycles (const GemmArgs &args, const PerformanceParameters &params)
 

Detailed Description

template<typename strategy, typename To, typename Tr>
class arm_gemm::GemmHybrid< strategy, To, Tr >

Definition at line 44 of file gemm_hybrid.hpp.

Constructor & Destructor Documentation

◆ GemmHybrid() [1/2]

GemmHybrid ( GemmHybrid< strategy, To, Tr > &  )
delete

◆ GemmHybrid() [2/2]

GemmHybrid ( const GemmArgs args)
inline

Definition at line 130 of file gemm_hybrid.hpp.

131  : _ci(args._ci), _Msize(args._Msize), _Nsize(args._Nsize), _Ksize(args._Ksize),
132  _nbatches(args._nbatches), _nmulti(args._nmulti),
133  _act(args._act),
134  _k_block(compute_k_block(args)), _n_block(compute_n_block(args)),
135  _Mround(roundup(args._Msize, strategy::out_height())),
136  _window_range(iceildiv(args._Msize, strategy::out_height()), _nbatches, iceildiv(_Nsize, _n_block), _nmulti) { }
T roundup(const T a, const T b)
Definition: utils.hpp:70
T iceildiv(const T a, const T b)
Definition: utils.hpp:65

Member Function Documentation

◆ B_is_pretransposed()

bool B_is_pretransposed ( ) const
inlineoverridevirtual

Reimplemented from IGemmCommon.

Definition at line 212 of file gemm_hybrid.hpp.

212  {
213  return true;
214  }

◆ B_pretranspose_required()

bool B_pretranspose_required ( ) const
inlineoverridevirtual

Reimplemented from IGemmCommon.

Definition at line 216 of file gemm_hybrid.hpp.

216  {
217  return (_B_transposed==nullptr);
218  }

◆ estimate_cycles()

static uint64_t estimate_cycles ( const GemmArgs args,
const PerformanceParameters params 
)
inlinestatic

Definition at line 253 of file gemm_hybrid.hpp.

References GemmArgs::_Ksize, GemmArgs::_Msize, GemmArgs::_nbatches, GemmArgs::_nmulti, GemmArgs::_Nsize, arm_compute::test::validation::if(), PerformanceParameters::kernel_macs_cycle, and arm_gemm::roundup().

253  {
254  // Note: Current hybrid kernels don't actually round up height (they
255  // have paths for each possible height). Might need to make this
256  // configurable in future.
257  uint64_t total_macs = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * roundup(args._Nsize, strategy::out_width()) * roundup(args._Ksize, strategy::k_unroll());
258 
259  float mac_cycles = static_cast<float>(total_macs) / params.kernel_macs_cycle;
260 
261  // TODO: A bit of a kludge here: current hybrid kernels incur extra
262  // overhead where the width is not a multiple of kernel width. It's
263  // most noticable where the overall width is quite low, so add 15%
264  // penalty for such widths.
265  if ((args._Nsize < strategy::out_width()) || (args._Nsize > strategy::out_width() && args._Nsize < 2*strategy::out_width())) {
266  mac_cycles *= 1.15f;
267  }
268 
269  uint64_t total_cycles = mac_cycles;
270 
271  return total_cycles;
272  }
T roundup(const T a, const T b)
Definition: utils.hpp:70

◆ execute()

void execute ( const ndcoord_t work_range,
const ndcoord_t thread_locator,
int  threadid 
)
inlineoverridevirtual

Main execute member fucntion.

Parameters
[in]work_rangespecifies the range of work we want to be computed, total range defined by get_window_size()
[in]thread_locatorwhere are we inside of the thread space
[in]threadida unique threadid

Implements IGemmCommon.

Definition at line 149 of file gemm_hybrid.hpp.

References arm_compute::test::validation::batch, arm_gemm::bias_adder(), NDCoordinate< N >::get_position(), NDCoordinate< N >::get_position_end(), NDRange< D >::iterator(), arm_gemm::roundup(), and strategy.

149  {
150 #ifdef CYCLE_PROFILING
151  profiler prof;
152 #endif
153  strategy strat(_ci);
154 
155  /* Make sure we've been set up correctly. */
156  assert(_B_transposed);
157  static_assert(std::is_same<To, Toi>::value, "gemm_native: Operand types must be the same.");
158  static_assert(std::is_same<Tr, Tri>::value, "gemm_native: Result types must be the same.");
159 
160  /* For now, each work item implies all the K for a given output
161  * pixel (so we don't need to synchronize access to the output
162  * array). So separate the loop over K blocks here. */
163  for (unsigned int k0=0; k0<_Ksize; k0+=_k_block) {
164  unsigned int kmax = std::min(k0 + _k_block, _Ksize);
165  unsigned int kern_k = roundup(kmax-k0, strategy::k_unroll());
166 
167  const bool first_pass = (k0 == 0);
168  const bool last_pass = (kmax == _Ksize);
169 
170  auto p = _window_range.iterator(work_range.get_position(0), work_range.get_position_end(0));
171 
172  if (p.done()) {
173  return;
174  }
175 
176  do {
177  const unsigned int m_start = p.dim(0) * strategy::out_height();
178  const unsigned int m_end = std::min(p.dim0_max() * strategy::out_height(), _Msize);
179  const unsigned int batch = p.dim(1);
180  const unsigned int n0 = p.dim(2) * _n_block;
181  const unsigned int nmax = std::min(n0 + _n_block, _Nsize);
182  const unsigned int multi = p.dim(3);
183 
184  const Toi *b_panel = _B_transposed +
185  (multi * roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_unroll())) +
186  (k0 * roundup(_Nsize, strategy::out_width())) +
187  (n0 * kern_k);
188 
189 #ifdef CYCLE_PROFILING
190  auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width()));
191 #endif
192 
193  strat.kernel(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + (m_start * this->_lda) + k0, this->_lda,
194  b_panel,
195  this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc,
196  (m_end - m_start), (nmax - n0), kmax-k0,
197  (strategy::supports_bias() && first_pass && this->_bias) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
198  last_pass ? _act : Activation(), !first_pass);
199 
200  // Add bias externally if needed
201  if (!strategy::supports_bias() && this->_bias && first_pass) {
202  bias_adder(this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc,
203  this->_bias + (multi * this->_bias_multi_stride) + n0,
204  (m_end - m_start), (nmax - n0));
205  }
206 
207  } while (p.next_dim1());
208  }
209  }
T roundup(const T a, const T b)
Definition: utils.hpp:70
NDRangeIterator iterator(unsigned int start, unsigned int end) const
Definition: ndrange.hpp:131
arm_compute::ActivationLayerInfo::ActivationFunction Activation
Constant TensorID specifying an equivalent of null tensor.
Definition: Types.h:73
const StratType * strategy
void bias_adder(T *out, unsigned int stride, const T *bias, unsigned int rows, unsigned int cols)
Definition: bias_adder.hpp:31

◆ get_B_pretransposed_array_size()

size_t get_B_pretransposed_array_size ( ) const
inlineoverridevirtual

Reimplemented from IGemmCommon.

Definition at line 220 of file gemm_hybrid.hpp.

References arm_gemm::roundup().

220  {
221  return roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_unroll()) * _nmulti * sizeof(Toi);
222  }
T roundup(const T a, const T b)
Definition: utils.hpp:70

◆ get_config()

GemmConfig get_config ( )
inlineoverridevirtual

Implements IGemmCommon.

Definition at line 274 of file gemm_hybrid.hpp.

References GemmConfig::filter, arm_gemm::GEMM_HYBRID, GemmConfig::inner_block_size, GemmConfig::method, and GemmConfig::outer_block_size.

274  {
275  GemmConfig c;
276 
277  c.method = GemmMethod::GEMM_HYBRID;
278  c.inner_block_size = _k_block;
279  c.outer_block_size = _n_block;
280  c.filter = get_type_name<strategy>();
281 
282  return c;
283  }

◆ get_window_size()

ndrange_t get_window_size ( ) const
inlineoverridevirtual
Returns
an ndrange containing ranges of the compute space which can be broken up and parallelised over

Implements IGemmCommon.

Definition at line 139 of file gemm_hybrid.hpp.

References NDRange< D >::total_size().

139  {
140  return { _window_range.total_size() };
141  }
unsigned int total_size() const
Definition: ndrange.hpp:136

◆ operator=()

GemmHybrid& operator= ( GemmHybrid< strategy, To, Tr > &  )
delete

◆ pretranspose_B_array()

void pretranspose_B_array ( void *  in_buffer,
const To *  B,
const int  ldb,
const int  B_multi_stride 
)
inlineoverridevirtual

Reimplemented from GemmCommon< To, Tr >.

Definition at line 224 of file gemm_hybrid.hpp.

References arm_gemm::roundup(), and strategy.

224  {
225  Toi *buffer = reinterpret_cast<Toi *>(in_buffer);
226  _B_transposed = buffer;
227  strategy strat(_ci);
228 
229  for (unsigned int multi=0; multi<_nmulti; multi++) {
230  for (unsigned int k0=0; k0<_Ksize; k0+=_k_block) {
231  const unsigned int kmax = std::min(k0 + _k_block, _Ksize);
232  const unsigned int k_size = roundup(kmax-k0, strategy::k_unroll());
233 
234  for (unsigned int x0=0; x0<_Nsize; x0+=_n_block) {
235  const unsigned int xmax = std::min(x0+_n_block, _Nsize);
236 
237  const unsigned int size = roundup(xmax-x0, strategy::out_width()) * k_size;
238 
239  strat.transforms.PrepareB( buffer, B + (multi * B_multi_stride), ldb,
240  x0, xmax, k0, kmax);
241 
242  buffer += size;
243  }
244  }
245  }
246  }
T roundup(const T a, const T b)
Definition: utils.hpp:70
const StratType * strategy

◆ set_pretransposed_B_data()

void set_pretransposed_B_data ( void *  in_buffer)
inlineoverridevirtual

Reimplemented from IGemmCommon.

Definition at line 248 of file gemm_hybrid.hpp.

248  {
249  _B_transposed = reinterpret_cast<Toi *>(in_buffer);
250  }

◆ supports_dynamic_scheduling()

bool supports_dynamic_scheduling ( ) const
inlineoverridevirtual

Reimplemented from IGemmCommon.

Definition at line 144 of file gemm_hybrid.hpp.

144  {
145  return true;
146  }

The documentation for this class was generated from the following file: