Compute Library
 20.08
NEGEMMLowpOffsetContributionOutputStageKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
27 #include "arm_compute/core/Error.h"
33 #include "arm_compute/core/Types.h"
34 #include "arm_compute/core/Utils.h"
37 
38 #include <arm_neon.h>
39 #include <cstddef>
40 #include <cstdint>
41 #include <map>
42 
43 namespace arm_compute
44 {
45 class Coordinates;
46 
47 namespace
48 {
49 inline int32x4x4_t load_results_input(const Iterator &mm_result_it, int32_t x)
50 {
51  return
52  {
53  {
54  vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 0),
55  vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 4),
56  vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 8),
57  vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 12)
58  }
59  };
60 }
61 
62 inline int32x4x4_t load(const int32_t *ptr, int32_t x)
63 {
64  return
65  {
66  {
67  vld1q_s32(ptr + x + 0),
68  vld1q_s32(ptr + x + 4),
69  vld1q_s32(ptr + x + 8),
70  vld1q_s32(ptr + x + 12)
71  }
72  };
73 }
74 
75 inline int32x4x4_t add_s32(int32x4x4_t a, int32x4_t b)
76 {
77  return
78  {
79  {
80  vaddq_s32(a.val[0], b),
81  vaddq_s32(a.val[1], b),
82  vaddq_s32(a.val[2], b),
83  vaddq_s32(a.val[3], b)
84  }
85  };
86 }
87 
88 inline int32x4x4_t add_s32(int32x4x4_t a, int32x4x4_t b)
89 {
90  return
91  {
92  {
93  vaddq_s32(a.val[0], b.val[0]),
94  vaddq_s32(a.val[1], b.val[1]),
95  vaddq_s32(a.val[2], b.val[2]),
96  vaddq_s32(a.val[3], b.val[3])
97  }
98  };
99 }
100 
101 inline int32x4x4_t mul_s32(int32x4x4_t &a, int32_t mul_scalar)
102 {
103  return
104  {
105  {
106  vmulq_n_s32(a.val[0], mul_scalar),
107  vmulq_n_s32(a.val[1], mul_scalar),
108  vmulq_n_s32(a.val[2], mul_scalar),
109  vmulq_n_s32(a.val[3], mul_scalar)
110  }
111  };
112 }
113 
114 inline int32x4x4_t mul_s32(int32x4x4_t &a, const int32_t *multilpier)
115 {
116  return
117  {
118  {
119  vmulq_s32(a.val[0], vld1q_s32(multilpier)),
120  vmulq_s32(a.val[1], vld1q_s32(multilpier + 4)),
121  vmulq_s32(a.val[2], vld1q_s32(multilpier + 8)),
122  vmulq_s32(a.val[3], vld1q_s32(multilpier + 12))
123  }
124  };
125 }
126 
127 inline int32x4x4_t get_a_offset(const int32_t *vector_sum_col_ptr, int32_t a_offset, int32_t x)
128 {
129  int32x4x4_t a_offset_term_s32 = load(vector_sum_col_ptr, x);
130 
131  a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
132  a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
133  a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
134  a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
135  return a_offset_term_s32;
136 }
137 
138 inline int32x4_t get_b_offset(const int32_t *vector_sum_row_ptr, int32_t b_offset)
139 {
140  int32x4_t b_offset_term_s32 = vld1q_dup_s32(vector_sum_row_ptr);
141  b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, b_offset);
142  return b_offset_term_s32;
143 }
144 
145 inline int32x4x4_t get_k_offset(int32_t k_offset)
146 {
147  return
148  {
149  {
150  vdupq_n_s32(k_offset),
151  vdupq_n_s32(k_offset),
152  vdupq_n_s32(k_offset),
153  vdupq_n_s32(k_offset)
154  }
155  };
156 }
157 
158 inline uint8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8, bool is_bounded_relu)
159 {
160  const static int32x4_t zero_s32 = vdupq_n_s32(0);
161 
162  // Shift final result (negative value shift right)
163  in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
164  in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
165  in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
166  in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
167 
168  // Saturate negative values
169  in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
170  in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
171  in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
172  in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
173 
174  // Convert S32 to S16
175  const int16x8x2_t in_s16 =
176  {
177  {
178  vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
179  vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
180  }
181  };
182 
183  // Convert S16 to U8
184  uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));
185 
186  if(is_bounded_relu)
187  {
188  out_u8 = vmaxq_u8(out_u8, min_u8);
189  out_u8 = vminq_u8(out_u8, max_u8);
190  }
191 
192  return out_u8;
193 }
194 
195 inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8, bool is_bounded_relu)
196 {
197  const static int32x4_t zero_s32 = vdupq_n_s32(0);
198 
199  // Shift final result (negative value shift right)
200  in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
201  in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
202  in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
203  in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
204 
205  // Saturate negative values
206  in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
207  in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
208  in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
209  in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
210 
211  // Convert S32 to S16
212  const int16x8x2_t in_s16 =
213  {
214  {
215  vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
216  vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
217  }
218  };
219 
220  // Convert S16 to S8
221  int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
222 
223  if(is_bounded_relu)
224  {
225  out_s8 = vmaxq_s8(out_s8, min_s8);
226  out_s8 = vminq_s8(out_s8, max_s8);
227  }
228 
229  return out_s8;
230 }
231 
232 inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8, bool is_bounded_relu)
233 {
234  const static int32x4_t zero_s32 = vdupq_n_s32(0);
235 
236  // Shift final result (negative value shift right)
237  in_s32.val[0] = vshlq_s32(in_s32.val[0], vnegq_s32(result_shift_s32.val[0]));
238  in_s32.val[1] = vshlq_s32(in_s32.val[1], vnegq_s32(result_shift_s32.val[1]));
239  in_s32.val[2] = vshlq_s32(in_s32.val[2], vnegq_s32(result_shift_s32.val[2]));
240  in_s32.val[3] = vshlq_s32(in_s32.val[3], vnegq_s32(result_shift_s32.val[3]));
241 
242  // Saturate negative values
243  in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
244  in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
245  in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
246  in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
247 
248  // Convert S32 to S16
249  const int16x8x2_t in_s16 =
250  {
251  {
252  vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
253  vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
254  }
255  };
256 
257  // Convert S16 to S8
258  int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
259 
260  if(is_bounded_relu)
261  {
262  out_s8 = vmaxq_s8(out_s8, min_s8);
263  out_s8 = vminq_s8(out_s8, max_s8);
264  }
265 
266  return out_s8;
267 }
268 
269 template <typename T>
270 struct VectorTyper
271 {
272  using stype = T;
273  using vtype = typename wrapper::traits::neon_bitvector_t<T, wrapper::traits::BitWidth::W128>;
274 };
275 
276 inline Window get_win_vector_sum(const Window &window)
277 {
278  Window win_vector_sum(window);
279  win_vector_sum.set(Window::DimY, Window::Dimension(0, 0, 0));
280  win_vector_sum.set(Window::DimZ, Window::Dimension(0, 0, 0));
281  return win_vector_sum;
282 }
283 
284 inline Iterator get_vector_sum_col_it(const Window &window, const ITensor *vector_sum_col)
285 {
286  Iterator vector_sum_col_it(vector_sum_col, get_win_vector_sum(window));
287  return vector_sum_col_it;
288 }
289 
290 inline Iterator get_vector_sum_row_it(const Window &window, const ITensor *vector_sum_row)
291 {
292  Window win_vector_sum_row = get_win_vector_sum(window);
293  win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
294  Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
295  return vector_sum_row_it;
296 }
297 
298 inline Iterator get_bias_it(const Window &window, const ITensor *bias)
299 {
300  Window win_bias(window);
301  win_bias.set(Window::DimY, Window::Dimension(0, 1, 1));
302  win_bias.set(Window::DimZ, Window::Dimension(0, 1, 1));
303  Iterator bias_it(bias, win_bias);
304  return bias_it;
305 }
306 
307 template <typename VT>
308 inline void run_offset_contribution_output_stage_window(const int32_t *vector_sum_col_ptr, const int32_t *vector_sum_row_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
309  const int32x4_t result_offset_s32, const int32x4_t result_shift_s32,
310  typename VT::vtype min_vec, typename VT::vtype max_vec,
311  int32_t a_offset, int32_t b_offset, int32_t k_offset,
312  int32_t multiplier, int32_t shift, int32_t offset, int32_t min_bound, int32_t max_bound,
313  int window_step_x, int window_start_x, int window_end_x, bool has_a_offset, bool has_b_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point)
314 {
315  int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
316  if(!is_fixed_point)
317  {
318  // Combine quantization offset with other offsets.
319  offset_term_s32 = add_s32(offset_term_s32, result_offset_s32);
320  }
321  if(has_a_offset && has_b_offset)
322  {
323  offset_term_s32 = add_s32(offset_term_s32, get_k_offset(k_offset));
324  }
325  if(has_b_offset)
326  {
327  offset_term_s32 = add_s32(offset_term_s32, get_b_offset(vector_sum_row_ptr, b_offset));
328  }
329 
330  int x = window_start_x;
331  for(; x <= (window_end_x - window_step_x); x += window_step_x)
332  {
333  int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
334 
335  if(has_a_offset)
336  {
337  in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
338  }
339  if(has_bias)
340  {
341  in_s32 = add_s32(in_s32, load(bias_ptr, x));
342  }
343  if(!is_fixed_point || has_b_offset)
344  {
345  in_s32 = add_s32(in_s32, offset_term_s32);
346  }
347  if(!is_fixed_point)
348  {
349  in_s32 = mul_s32(in_s32, multiplier);
350  }
351 
352  if(is_fixed_point)
353  {
354  wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
355  finalize_quantization(in_s32, multiplier, shift, result_offset_s32, min_vec, max_vec, is_bounded_relu));
356  }
357  else
358  {
359  wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
360  finalize_quantization_floating_point(in_s32, result_shift_s32, min_vec, max_vec, is_bounded_relu));
361  }
362  }
363  // Compute left-over elements
364  for(; x < window_end_x; ++x)
365  {
366  int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
367 
368  if(has_a_offset)
369  {
370  in_value += (*(vector_sum_col_ptr + x) * a_offset);
371  }
372  if(has_bias)
373  {
374  in_value += *(bias_ptr + x);
375  }
376 
377  if(is_fixed_point)
378  {
379  // Finalize and store the result
380  *reinterpret_cast<typename VT::stype *>(out_it.ptr() + x) = finalize_quantization(in_value, multiplier, shift, offset,
381  static_cast<typename VT::stype>(min_bound),
382  static_cast<typename VT::stype>(max_bound), is_bounded_relu);
383  }
384  else
385  {
386  // Finalize quantization
387  in_value = (in_value * multiplier) >> shift;
388 
389  // Bound and store the result
390  if(is_bounded_relu)
391  {
392  in_value = static_cast<typename VT::stype>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
393  }
394  *reinterpret_cast<typename VT::stype *>(out_it.ptr() + x) = static_cast<typename VT::stype>(std::max<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::lowest()),
395  std::min<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::max()), in_value)));
396  }
397  }
398 }
399 
400 inline void run_offset_contribution_output_stage_window_symm(const int32_t *vector_sum_col_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
401  const int32_t *result_multipliers, const int32_t *result_shifts,
402  const int32x4_t result_offset, int8x16_t min_s8, int8x16_t max_s8,
403  int32_t a_offset, int32_t offset, int32_t min_bound, int32_t max_bound,
404  int window_step_x, int window_start_x, int window_end_x, bool has_a_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point)
405 {
406  int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
407  if(!is_fixed_point)
408  {
409  // Combine quantization offset with other offsets.
410  offset_term_s32 = add_s32(offset_term_s32, result_offset);
411  }
412 
413  int x = window_start_x;
414  for(; x <= (window_end_x - window_step_x); x += window_step_x)
415  {
416  int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
417 
418  if(has_a_offset)
419  {
420  in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
421  }
422  if(has_bias)
423  {
424  in_s32 = add_s32(in_s32, load(bias_ptr, x));
425  }
426  if(!is_fixed_point)
427  {
428  in_s32 = add_s32(in_s32, offset_term_s32);
429  in_s32 = mul_s32(in_s32, result_multipliers + x);
430  }
431 
432  if(is_fixed_point)
433  {
434  vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_symm(in_s32, load(result_multipliers, x), load(result_shifts, x), result_offset, min_s8, max_s8, is_bounded_relu));
435  }
436  else
437  {
438  vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_floating_point(in_s32, load(result_shifts, x), min_s8, max_s8, is_bounded_relu));
439  }
440  }
441  // Compute left-over elements
442  for(; x < window_end_x; ++x)
443  {
444  int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
445 
446  if(has_a_offset)
447  {
448  in_value += (*(vector_sum_col_ptr + x) * a_offset);
449  }
450  if(has_bias)
451  {
452  in_value += *(bias_ptr + x);
453  }
454 
455  if(is_fixed_point)
456  {
457  // Finalize and store the result
458  *(out_it.ptr() + x) = finalize_quantization(in_value, result_multipliers[x], result_shifts[x], offset, static_cast<int8_t>(min_bound), static_cast<int8_t>(max_bound), is_bounded_relu);
459  }
460  else
461  {
462  // Finalize quantization
463  in_value = (in_value * result_multipliers[x]) >> (-result_shifts[x]);
464 
465  // Bound and store the result
466  if(is_bounded_relu)
467  {
468  in_value = static_cast<int8_t>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
469  }
470  *(out_it.ptr() + x) = static_cast<int8_t>(std::max<int32_t>(-128, std::min<int32_t>(127, in_value)));
471  }
472  }
473 }
474 
475 template <typename T>
476 void run_offset_contribution_output_stage(const Window &window,
477  const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
478  int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col,
479  GEMMLowpOutputStageInfo output_stage, bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point)
480 {
481  using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
482  using Typer = VectorTyper<T>;
483 
484  const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0;
485  const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
486 
487  const int32_t multiplier = output_stage.gemmlowp_multiplier;
488  const int32_t shift = output_stage.gemmlowp_shift;
489  const int32_t offset = output_stage.gemmlowp_offset;
490  const int32_t min_bound = output_stage.gemmlowp_min_bound;
491  const int32_t max_bound = output_stage.gemmlowp_max_bound;
492 
493  const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
494  const int32x4_t result_shift_s32 = vdupq_n_s32(is_fixed_point ? shift : -shift);
495  const auto min_vec = wrapper::vdup_n(static_cast<T>(min_bound), ExactTagType{});
496  const auto max_vec = wrapper::vdup_n(static_cast<T>(max_bound), ExactTagType{});
497 
498  const int window_step_x = 16;
499  const auto window_start_x = static_cast<int>(window.x().start());
500  const auto window_end_x = static_cast<int>(window.x().end());
501 
502  Window win(window);
503  win.set(Window::DimX, Window::Dimension(0, 1, 1));
504 
505  Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
506 
507  Iterator mm_result_it(mm_result, win);
508  Iterator out_it(output, win);
509 
510  if((a_offset != 0) && (b_offset != 0))
511  {
512  ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
513  ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
514 
515  Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
516  Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
517 
518  const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
519 
520  // Offset in case vector_sum_col is batched
521  const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
522 
523  if(bias != nullptr)
524  {
525  Iterator bias_it = get_bias_it(collapsed_window, bias);
526  execute_window_loop(collapsed_window, [&](const Coordinates & id)
527  {
528  const int batch_id = id.z() / depth_input;
529  const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
530  const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
531  + id.y() + (id.z() % depth_input) * height_input;
532  run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()),
533  mm_result_it,
534  out_it,
535  result_offset_s32, result_shift_s32,
536  min_vec, max_vec, a_offset, b_offset, k_offset,
537  multiplier, shift, offset, min_bound, max_bound,
538  window_step_x, window_start_x, window_end_x, true, true, true, is_bounded_relu, is_fixed_point);
539  },
540  vector_sum_col_it, vector_sum_row_it, bias_it, mm_result_it, out_it);
541  }
542  else
543  {
544  execute_window_loop(collapsed_window, [&](const Coordinates & id)
545  {
546  const int batch_id = id.z() / depth_input;
547  const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
548  const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
549  + id.y() + (id.z() % depth_input) * height_input;
550  run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
551  result_offset_s32, result_shift_s32,
552  min_vec, max_vec, a_offset, b_offset, k_offset,
553  multiplier, shift, offset, min_bound, max_bound,
554  window_step_x, window_start_x, window_end_x, true, true, false, is_bounded_relu, is_fixed_point);
555  },
556  vector_sum_col_it, vector_sum_row_it, mm_result_it, out_it);
557  }
558  }
559  else if((a_offset == 0) && (b_offset != 0))
560  {
561  ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
562 
563  Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
564 
565  const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
566 
567  if(bias != nullptr)
568  {
569  Iterator bias_it = get_bias_it(collapsed_window, bias);
570  execute_window_loop(collapsed_window, [&](const Coordinates & id)
571  {
572  const int batch_id = id.z() / depth_input;
573  const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
574  + id.y() + (id.z() % depth_input) * height_input;
575  run_offset_contribution_output_stage_window<Typer>(nullptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
576  out_it,
577  result_offset_s32, result_shift_s32,
578  min_vec, max_vec, a_offset, b_offset, k_offset,
579  multiplier, shift, offset, min_bound, max_bound,
580  window_step_x, window_start_x, window_end_x, false, true, true, is_bounded_relu, is_fixed_point);
581  },
582  vector_sum_row_it, bias_it, mm_result_it, out_it);
583  }
584  else
585  {
586  execute_window_loop(collapsed_window, [&](const Coordinates & id)
587  {
588  const int batch_id = id.z() / depth_input;
589  const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
590  + id.y() + (id.z() % depth_input) * height_input;
591  run_offset_contribution_output_stage_window<Typer>(nullptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
592  result_offset_s32, result_shift_s32,
593  min_vec, max_vec, a_offset, b_offset, k_offset,
594  multiplier, shift, offset, min_bound, max_bound,
595  window_step_x, window_start_x, window_end_x, false, true, false, is_bounded_relu, is_fixed_point);
596  },
597  vector_sum_row_it, mm_result_it, out_it);
598  }
599  }
600  else if((a_offset != 0) && (b_offset == 0))
601  {
602  ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
603 
604  Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
605 
606  // Offset in case vector_sum_col is batched
607  const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
608 
609  if(bias != nullptr)
610  {
611  Iterator bias_it = get_bias_it(collapsed_window, bias);
612  execute_window_loop(collapsed_window, [&](const Coordinates & id)
613  {
614  const int batch_id = id.z() / depth_input;
615  const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
616  run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
617  out_it,
618  result_offset_s32, result_shift_s32,
619  min_vec, max_vec, a_offset, b_offset, k_offset,
620  multiplier, shift, offset, min_bound, max_bound,
621  window_step_x, window_start_x, window_end_x, true, false, true, is_bounded_relu, is_fixed_point);
622  },
623  vector_sum_col_it, bias_it, mm_result_it, out_it);
624  }
625  else
626  {
627  execute_window_loop(collapsed_window, [&](const Coordinates & id)
628  {
629  const int batch_id = id.z() / depth_input;
630  const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
631  run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, nullptr, nullptr, mm_result_it, out_it,
632  result_offset_s32, result_shift_s32,
633  min_vec, max_vec, a_offset, b_offset, k_offset,
634  multiplier, shift, offset, min_bound, max_bound,
635  window_step_x, window_start_x, window_end_x, true, false, false, is_bounded_relu, is_fixed_point);
636  },
637  vector_sum_col_it, mm_result_it, out_it);
638  }
639  }
640  else
641  {
642  if(bias != nullptr)
643  {
644  Iterator bias_it = get_bias_it(collapsed_window, bias);
645  execute_window_loop(collapsed_window, [&](const Coordinates &)
646  {
647  run_offset_contribution_output_stage_window<Typer>(nullptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
648  result_offset_s32, result_shift_s32,
649  min_vec, max_vec, a_offset, b_offset, k_offset,
650  multiplier, shift, offset, min_bound, max_bound,
651  window_step_x, window_start_x, window_end_x, false, false, true, is_bounded_relu, is_fixed_point);
652  },
653  bias_it, mm_result_it, out_it);
654  }
655  else
656  {
657  execute_window_loop(collapsed_window, [&](const Coordinates &)
658  {
659  run_offset_contribution_output_stage_window<Typer>(nullptr, nullptr, nullptr, mm_result_it, out_it,
660  result_offset_s32, result_shift_s32,
661  min_vec, max_vec, a_offset, b_offset, k_offset,
662  multiplier, shift, offset, min_bound, max_bound,
663  window_step_x, window_start_x, window_end_x, false, false, false, is_bounded_relu, is_fixed_point);
664  },
665  mm_result_it, out_it);
666  }
667  return;
668  }
669 }
670 
671 void run_offset_contribution_output_stage_symm(const Window &window,
672  const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
673  int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col,
674  GEMMLowpOutputStageInfo output_stage, bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point)
675 {
676  ARM_COMPUTE_UNUSED(vector_sum_row, b_offset, k_offset);
677 
678  const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
679 
680  const int32_t offset = output_stage.gemmlowp_offset;
681  const int32_t min_bound = output_stage.gemmlowp_min_bound;
682  const int32_t max_bound = output_stage.gemmlowp_max_bound;
683 
684  const int32_t *result_multipliers = output_stage.gemmlowp_multipliers.data();
685  const int32_t *result_shifts = output_stage.gemmlowp_shifts.data();
686  const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
687  const int8x16_t min_s8 = vdupq_n_s8(static_cast<int8_t>(min_bound));
688  const int8x16_t max_s8 = vdupq_n_s8(static_cast<int8_t>(max_bound));
689 
690  const int window_step_x = 16;
691  const auto window_start_x = static_cast<int>(window.x().start());
692  const auto window_end_x = static_cast<int>(window.x().end());
693 
694  Window win(window);
695  win.set(Window::DimX, Window::Dimension(0, 1, 1));
696 
697  Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
698 
699  Iterator mm_result_it(mm_result, win);
700  Iterator out_it(output, win);
701 
702  if(a_offset != 0)
703  {
704  ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
705 
706  Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
707 
708  // Offset in case vector_sum_col is batched
709  const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
710 
711  if(bias != nullptr)
712  {
713  Iterator bias_it = get_bias_it(collapsed_window, bias);
714  execute_window_loop(collapsed_window, [&](const Coordinates & id)
715  {
716  const int batch_id = id.z() / depth_input;
717  const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
718  run_offset_contribution_output_stage_window_symm(vector_sum_col_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
719  result_multipliers, result_shifts,
720  result_offset_s32, min_s8, max_s8,
721  a_offset, offset, min_bound, max_bound,
722  window_step_x, window_start_x, window_end_x, true, true, is_bounded_relu, is_fixed_point);
723  },
724  vector_sum_col_it, bias_it, mm_result_it, out_it);
725  }
726  else
727  {
728  execute_window_loop(collapsed_window, [&](const Coordinates & id)
729  {
730  const int batch_id = id.z() / depth_input;
731  const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
732  run_offset_contribution_output_stage_window_symm(vector_sum_col_ptr, nullptr, mm_result_it, out_it,
733  result_multipliers, result_shifts,
734  result_offset_s32, min_s8, max_s8,
735  a_offset, offset, min_bound, max_bound,
736  window_step_x, window_start_x, window_end_x, true, false, is_bounded_relu, is_fixed_point);
737  },
738  vector_sum_col_it, mm_result_it, out_it);
739  }
740  }
741  else
742  {
743  if(bias != nullptr)
744  {
745  Iterator bias_it = get_bias_it(collapsed_window, bias);
746  execute_window_loop(collapsed_window, [&](const Coordinates &)
747  {
748  run_offset_contribution_output_stage_window_symm(nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
749  result_multipliers, result_shifts,
750  result_offset_s32, min_s8, max_s8,
751  a_offset, offset, min_bound, max_bound,
752  window_step_x, window_start_x, window_end_x, false, true, is_bounded_relu, is_fixed_point);
753  },
754  bias_it, mm_result_it, out_it);
755  }
756  else
757  {
758  execute_window_loop(collapsed_window, [&](const Coordinates &)
759  {
760  run_offset_contribution_output_stage_window_symm(nullptr, nullptr, mm_result_it, out_it,
761  result_multipliers, result_shifts,
762  result_offset_s32, min_s8, max_s8,
763  a_offset, offset, min_bound, max_bound,
764  window_step_x, window_start_x, window_end_x, false, false, is_bounded_relu, is_fixed_point);
765  },
766  mm_result_it, out_it);
767  }
768  return;
769  }
770 }
771 
772 Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
773  int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
774 {
776  if(output->data_type() != DataType::QASYMM8)
777  {
778  ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) > 1 && output_stage.gemmlowp_multipliers.size() > 1 && b_offset != 0);
779  }
780  ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
782 
783  if(bias != nullptr)
784  {
786  ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
787  ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
788  }
789 
790  // If a_offset == 0, vector_sum_col can be a nullptr
791  if(a_offset != 0)
792  {
794  ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
795  }
796 
797  // If b_offset == 0, vector_sum_row can be a nullptr
798  if(b_offset != 0)
799  {
801 
802  // Check if input is a 3D reinterpretation
803  const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
804 
805  // Validate input
806  ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
807  ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
808 
809  TensorShape output_shape = output->tensor_shape();
810  if(output_shape.num_dimensions() > 1)
811  {
812  const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
813 
814  TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
815  vector_sum_row_shape.collapse_from(1);
816  output_shape.collapse_from(output_batch_idx);
817 
818  ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
819  "mm_result tensor must have the same number of batches of output tensor");
820 
821  if(a_offset != 0)
822  {
823  TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
824  vector_sum_col_shape.collapse_from(1);
825 
826  ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
827  "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
828  }
829  }
830  }
831 
832  if(output->total_size() != 0)
833  {
836  }
837 
838  return Status{};
839 }
840 
841 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *output)
842 {
843  // Output auto inizialitation if not yet initialized
844  auto_init_if_empty(*output, mm_result->clone()->set_data_type(DataType::QASYMM8));
845 
846  // Configure kernel window
847  Window win = calculate_max_window(*mm_result, Steps());
848 
849  // Note: This kernel performs 16 elements per iteration.
850  // However, since we use a left-over for loop, we cannot have any read or write out of memory
851  // For this reason num_elems_processed_per_iteration is 1 and so update_window_and_padding() can be skipped
852  Coordinates coord;
853  coord.set_num_dimensions(output->num_dimensions());
854  output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
855 
856  return std::make_pair(Status{}, win);
857 }
858 } // namespace
859 
861  : _vector_sum_col(nullptr), _vector_sum_row(nullptr), _bias(nullptr), _mm_result(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true),
862  _output_stage(GEMMLowpOutputStageInfo())
863 
864 {
865 }
866 
867 void NEGEMMLowpOffsetContributionOutputStageKernel::configure(const ITensor *mm_result, const ITensor *vector_sum_col,
868  const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
869  int32_t k, int32_t a_offset, int32_t b_offset,
870  GEMMLowpOutputStageInfo output_stage)
871 {
872  // Perform validate step
873  ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
874 
876  vector_sum_col != nullptr ? vector_sum_col->info() : nullptr, // NOLINT
877  vector_sum_row != nullptr ? vector_sum_row->info() : nullptr, // NOLINT
878  bias != nullptr ? bias->info() : nullptr, // NOLINT
879  output->info(), a_offset, b_offset, output_stage)); // NOLINT
880 
881  _vector_sum_col = vector_sum_col;
882  _vector_sum_row = vector_sum_row;
883  _bias = bias;
884  _mm_result = mm_result;
885  _output = output;
886  _a_offset = a_offset;
887  _b_offset = b_offset;
888  _k_offset = a_offset * b_offset * k;
889  _output_stage = output_stage;
890 
891  // If a_offset == 0, vector_sum_col can be a nullptr
892  if(a_offset != 0)
893  {
894  // Check if vector_sum_col_shape should be slidden or not
895  // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1
896  // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
897  _slide_vector_sum_col = vector_sum_col->info()->tensor_shape().num_dimensions() > 1;
898  }
899 
900  // Configure kernel window
901  auto win_config = validate_and_configure_window(mm_result->info(), output->info());
902  ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
903  INEKernel::configure(win_config.second);
904 }
905 
907  const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
908  int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
909 {
910  ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
911  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage));
912  ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(), output->clone().get()).first);
913  return Status{};
914 }
915 
917 {
921 
924  std::tie(type_min, type_max) = get_min_max(_output->info()->data_type());
925  int32_t type_min_int = type_min.get<int32_t>();
926  int32_t type_max_int = type_max.get<int32_t>();
927 
928  const bool reinterpret_as_3d = _vector_sum_row != nullptr
929  && _mm_result->info()->num_dimensions() > 1
930  && _mm_result->info()->tensor_shape().y() != _vector_sum_row->info()->tensor_shape().x();
931 
932  const bool is_bounded_relu = !(_output_stage.gemmlowp_min_bound <= type_min_int && _output_stage.gemmlowp_max_bound >= type_max_int);
933 
934  // Check if we need to perform fixed point requantization
935  const bool is_fixed_point = _output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN;
936 
937  // Check if symmetric per-channel execution
938  const bool is_signed = _output->info()->data_type() == DataType::QASYMM8_SIGNED;
939 
940  // Check if symmetric per-channel execution
941  const bool is_symm = _output_stage.is_quantized_per_channel;
942 
943  if(is_symm)
944  {
945  run_offset_contribution_output_stage_symm(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
946  reinterpret_as_3d, is_bounded_relu, is_fixed_point);
947  }
948  else
949  {
950  if(is_signed)
951  {
952  run_offset_contribution_output_stage<int8_t>(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
953  reinterpret_as_3d, is_bounded_relu, is_fixed_point);
954  }
955  else
956  {
957  run_offset_contribution_output_stage<uint8_t>(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
958  reinterpret_as_3d, is_bounded_relu, is_fixed_point);
959  }
960  }
961 }
962 
963 } // namespace arm_compute
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:719
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
Quantize using a fixed point multiplication.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
SimpleTensor< float > b
Definition: DFT.cpp:157
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status class.
Definition: Error.h:52
int32_t gemmlowp_max_bound
GEMMLowp max value used to saturate down the output result before converting back to QASYMM8.
Definition: Types.h:1888
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
GEMMLowpOutputStageType type
GEMMLowp output stage type.
Definition: Types.h:1883
Interface for NEON tensor.
Definition: ITensor.h:36
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps=Steps(), bool skip_border=false, BorderSize border_size=BorderSize())
Calculate the maximum window for a given tensor shape and border setting.
Definition: Helpers.cpp:28
Copyright (c) 2017-2020 Arm Limited.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
Definition: Helpers.inl:207
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor's metadata.
Definition: Tensor.cpp:33
bool is_quantized_per_channel
GEMMLowp quantized per-channel flag.
Definition: Types.h:1892
1 channel, 1 S32 per channel
T x() const
Alias to access the size of the first dimension.
Definition: Dimensions.h:81
uint8_t vgetlane(const uint8x8_t vector, const unsigned int lane)
Definition: getlane.h:91
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
void collapse_from(size_t start)
Collapse dimensions starting from a given point.
Definition: Dimensions.h:162
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:443
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
GEMMLowp output stage info.
Definition: Types.h:1881
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
void configure(const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output, int32_t k, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
Initialise the kernel's input and output.
Quantize using an integer multiplication.
__constant DATA_TYPE16 type_min
Definition: minmaxloc.cl:46
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
uint8x16_t finalize_quantization(int32x4x4_t &in_s32, int result_fixedpoint_multiplier, int32_t result_shift, int32x4_t result_offset_after_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8, bool is_bounded_relu)
Performs final quantization step on 16 elements.
Definition: NEAsymm.h:80
Information about executing thread and CPU.
Definition: CPPTypes.h:235
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:122
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
__constant DATA_TYPE16 type_max
Definition: minmaxloc.cl:47
void vstore(uint8_t *ptr, uint8x8_t val)
Definition: store.h:39
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
uint8x8_t vdup_n(uint8_t value, traits::vector_64_tag)
Definition: dup_n.h:41
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:128
int8x16_t finalize_quantization_symm(int32x4x4_t &in_s32, const int32x4x4_t &result_fixedpoint_multiplier, const int32x4x4_t &result_shift, const int32x4_t &result_offset_after_shift_s32, const int8x16_t &min_s8, const int8x16_t &max_s8, const bool is_bounded_relu)
Performs final quantization step on 16 elements for symmetric quantization.
Definition: NEAsymm.h:236
T y() const
Alias to access the size of the second dimension.
Definition: Dimensions.h:86
quantized, asymmetric fixed-point 8-bit number signed
static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
Static function to check if given info will lead to a valid configuration of NEGEMMLowpOffsetContribu...
int32_t gemmlowp_min_bound
GEMMLowp min value used to saturate down the output result before converting back to QASYMM8.
Definition: Types.h:1887
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205
std::tuple< PixelValue, PixelValue > get_min_max(DataType dt)
Compute the mininum and maximum values a data type can take.
Definition: Utils.h:560
Describe a multidimensional execution window.
Definition: Window.h:39
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941