Compute Library
 21.02
NEGEMMLowpOffsetContributionOutputStageKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
30 #include "arm_compute/core/Types.h"
31 #include "arm_compute/core/Utils.h"
35 #include "src/core/NEON/NEAsymm.h"
39 
40 #include <arm_neon.h>
41 #include <cstddef>
42 #include <cstdint>
43 #include <map>
44 
45 namespace arm_compute
46 {
47 class Coordinates;
48 
49 namespace
50 {
51 inline int32x4x4_t load_results_input(const Iterator &mm_result_it, int32_t x)
52 {
53  return
54  {
55  {
56  vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 0),
57  vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 4),
58  vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 8),
59  vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 12)
60  }
61  };
62 }
63 
64 inline int32x4x4_t load(const int32_t *ptr, int32_t x)
65 {
66  return
67  {
68  {
69  vld1q_s32(ptr + x + 0),
70  vld1q_s32(ptr + x + 4),
71  vld1q_s32(ptr + x + 8),
72  vld1q_s32(ptr + x + 12)
73  }
74  };
75 }
76 
77 inline int32x4x4_t add_s32(int32x4x4_t a, int32x4_t b)
78 {
79  return
80  {
81  {
82  vaddq_s32(a.val[0], b),
83  vaddq_s32(a.val[1], b),
84  vaddq_s32(a.val[2], b),
85  vaddq_s32(a.val[3], b)
86  }
87  };
88 }
89 
90 inline int32x4x4_t add_s32(int32x4x4_t a, int32x4x4_t b)
91 {
92  return
93  {
94  {
95  vaddq_s32(a.val[0], b.val[0]),
96  vaddq_s32(a.val[1], b.val[1]),
97  vaddq_s32(a.val[2], b.val[2]),
98  vaddq_s32(a.val[3], b.val[3])
99  }
100  };
101 }
102 
103 inline int32x4x4_t mul_s32(int32x4x4_t &a, int32_t mul_scalar)
104 {
105  return
106  {
107  {
108  vmulq_n_s32(a.val[0], mul_scalar),
109  vmulq_n_s32(a.val[1], mul_scalar),
110  vmulq_n_s32(a.val[2], mul_scalar),
111  vmulq_n_s32(a.val[3], mul_scalar)
112  }
113  };
114 }
115 
116 inline int32x4x4_t mul_s32(int32x4x4_t &a, const int32_t *multilpier)
117 {
118  return
119  {
120  {
121  vmulq_s32(a.val[0], vld1q_s32(multilpier)),
122  vmulq_s32(a.val[1], vld1q_s32(multilpier + 4)),
123  vmulq_s32(a.val[2], vld1q_s32(multilpier + 8)),
124  vmulq_s32(a.val[3], vld1q_s32(multilpier + 12))
125  }
126  };
127 }
128 
129 inline int32x4x4_t get_a_offset(const int32_t *vector_sum_col_ptr, int32_t a_offset, int32_t x)
130 {
131  int32x4x4_t a_offset_term_s32 = load(vector_sum_col_ptr, x);
132 
133  a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
134  a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
135  a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
136  a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
137  return a_offset_term_s32;
138 }
139 
140 inline int32x4_t get_b_offset(const int32_t *vector_sum_row_ptr, int32_t b_offset)
141 {
142  int32x4_t b_offset_term_s32 = vld1q_dup_s32(vector_sum_row_ptr);
143  b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, b_offset);
144  return b_offset_term_s32;
145 }
146 
147 inline int32x4x4_t get_k_offset(int32_t k_offset)
148 {
149  return
150  {
151  {
152  vdupq_n_s32(k_offset),
153  vdupq_n_s32(k_offset),
154  vdupq_n_s32(k_offset),
155  vdupq_n_s32(k_offset)
156  }
157  };
158 }
159 
160 inline uint8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8, bool is_bounded_relu)
161 {
162  const static int32x4_t zero_s32 = vdupq_n_s32(0);
163 
164  // Shift final result (negative value shift right)
165  in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
166  in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
167  in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
168  in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
169 
170  // Saturate negative values
171  in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
172  in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
173  in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
174  in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
175 
176  // Convert S32 to S16
177  const int16x8x2_t in_s16 =
178  {
179  {
180  vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
181  vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
182  }
183  };
184 
185  // Convert S16 to U8
186  uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));
187 
188  if(is_bounded_relu)
189  {
190  out_u8 = vmaxq_u8(out_u8, min_u8);
191  out_u8 = vminq_u8(out_u8, max_u8);
192  }
193 
194  return out_u8;
195 }
196 
197 inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8, bool is_bounded_relu)
198 {
199  const static int32x4_t zero_s32 = vdupq_n_s32(0);
200 
201  // Shift final result (negative value shift right)
202  in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
203  in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
204  in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
205  in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
206 
207  // Saturate negative values
208  in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
209  in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
210  in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
211  in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
212 
213  // Convert S32 to S16
214  const int16x8x2_t in_s16 =
215  {
216  {
217  vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
218  vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
219  }
220  };
221 
222  // Convert S16 to S8
223  int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
224 
225  if(is_bounded_relu)
226  {
227  out_s8 = vmaxq_s8(out_s8, min_s8);
228  out_s8 = vminq_s8(out_s8, max_s8);
229  }
230 
231  return out_s8;
232 }
233 
234 inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8, bool is_bounded_relu)
235 {
236  const static int32x4_t zero_s32 = vdupq_n_s32(0);
237 
238  // Shift final result (negative value shift right)
239  in_s32.val[0] = vshlq_s32(in_s32.val[0], vnegq_s32(result_shift_s32.val[0]));
240  in_s32.val[1] = vshlq_s32(in_s32.val[1], vnegq_s32(result_shift_s32.val[1]));
241  in_s32.val[2] = vshlq_s32(in_s32.val[2], vnegq_s32(result_shift_s32.val[2]));
242  in_s32.val[3] = vshlq_s32(in_s32.val[3], vnegq_s32(result_shift_s32.val[3]));
243 
244  // Saturate negative values
245  in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
246  in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
247  in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
248  in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
249 
250  // Convert S32 to S16
251  const int16x8x2_t in_s16 =
252  {
253  {
254  vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
255  vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
256  }
257  };
258 
259  // Convert S16 to S8
260  int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
261 
262  if(is_bounded_relu)
263  {
264  out_s8 = vmaxq_s8(out_s8, min_s8);
265  out_s8 = vminq_s8(out_s8, max_s8);
266  }
267 
268  return out_s8;
269 }
270 
271 template <typename T>
272 struct VectorTyper
273 {
274  using stype = T;
275  using vtype = typename wrapper::traits::neon_bitvector_t<T, wrapper::traits::BitWidth::W128>;
276 };
277 
278 inline Window get_win_vector_sum(const Window &window)
279 {
280  Window win_vector_sum(window);
281  win_vector_sum.set(Window::DimY, Window::Dimension(0, 0, 0));
282  win_vector_sum.set(Window::DimZ, Window::Dimension(0, 0, 0));
283  return win_vector_sum;
284 }
285 
286 inline Iterator get_vector_sum_col_it(const Window &window, const ITensor *vector_sum_col)
287 {
288  Iterator vector_sum_col_it(vector_sum_col, get_win_vector_sum(window));
289  return vector_sum_col_it;
290 }
291 
292 inline Iterator get_vector_sum_row_it(const Window &window, const ITensor *vector_sum_row)
293 {
294  Window win_vector_sum_row = get_win_vector_sum(window);
295  win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
296  Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
297  return vector_sum_row_it;
298 }
299 
300 inline Iterator get_bias_it(const Window &window, const ITensor *bias)
301 {
302  Window win_bias(window);
303  win_bias.set(Window::DimY, Window::Dimension(0, 1, 1));
304  win_bias.set(Window::DimZ, Window::Dimension(0, 1, 1));
305  Iterator bias_it(bias, win_bias);
306  return bias_it;
307 }
308 
309 template <typename VT>
310 inline void run_offset_contribution_output_stage_window(const int32_t *vector_sum_col_ptr, const int32_t *vector_sum_row_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
311  const int32x4_t result_offset_s32, const int32x4_t result_shift_s32,
312  typename VT::vtype min_vec, typename VT::vtype max_vec,
313  int32_t a_offset, int32_t b_offset, int32_t k_offset,
314  int32_t multiplier, int32_t shift, int32_t offset, int32_t min_bound, int32_t max_bound,
315  int window_step_x, int window_start_x, int window_end_x, bool has_a_offset, bool has_b_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point)
316 {
317  int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
318  if(!is_fixed_point)
319  {
320  // Combine quantization offset with other offsets.
321  offset_term_s32 = add_s32(offset_term_s32, result_offset_s32);
322  }
323  if(has_a_offset && has_b_offset)
324  {
325  offset_term_s32 = add_s32(offset_term_s32, get_k_offset(k_offset));
326  }
327  if(has_b_offset)
328  {
329  offset_term_s32 = add_s32(offset_term_s32, get_b_offset(vector_sum_row_ptr, b_offset));
330  }
331 
332  int x = window_start_x;
333  for(; x <= (window_end_x - window_step_x); x += window_step_x)
334  {
335  int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
336 
337  if(has_a_offset)
338  {
339  in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
340  }
341  if(has_bias)
342  {
343  in_s32 = add_s32(in_s32, load(bias_ptr, x));
344  }
345  if(!is_fixed_point || has_b_offset)
346  {
347  in_s32 = add_s32(in_s32, offset_term_s32);
348  }
349  if(!is_fixed_point)
350  {
351  in_s32 = mul_s32(in_s32, multiplier);
352  }
353 
354  if(is_fixed_point)
355  {
356  wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
357  finalize_quantization(in_s32, multiplier, shift, result_offset_s32, min_vec, max_vec, is_bounded_relu));
358  }
359  else
360  {
361  wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
362  finalize_quantization_floating_point(in_s32, result_shift_s32, min_vec, max_vec, is_bounded_relu));
363  }
364  }
365  // Compute left-over elements
366  for(; x < window_end_x; ++x)
367  {
368  int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
369 
370  if(has_a_offset)
371  {
372  in_value += (*(vector_sum_col_ptr + x) * a_offset);
373  }
374  if(has_bias)
375  {
376  in_value += *(bias_ptr + x);
377  }
378 
379  if(is_fixed_point)
380  {
381  // Finalize and store the result
382  *reinterpret_cast<typename VT::stype *>(out_it.ptr() + x) = finalize_quantization(in_value, multiplier, shift, offset,
383  static_cast<typename VT::stype>(min_bound),
384  static_cast<typename VT::stype>(max_bound), is_bounded_relu);
385  }
386  else
387  {
388  // Finalize quantization
389  in_value = (in_value * multiplier) >> shift;
390 
391  // Bound and store the result
392  if(is_bounded_relu)
393  {
394  in_value = static_cast<typename VT::stype>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
395  }
396  *reinterpret_cast<typename VT::stype *>(out_it.ptr() + x) = static_cast<typename VT::stype>(std::max<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::lowest()),
397  std::min<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::max()), in_value)));
398  }
399  }
400 }
401 
402 inline void run_offset_contribution_output_stage_window_symm(const int32_t *vector_sum_col_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
403  const int32_t *result_multipliers, const int32_t *result_shifts,
404  const int32x4_t result_offset, int8x16_t min_s8, int8x16_t max_s8,
405  int32_t a_offset, int32_t offset, int32_t min_bound, int32_t max_bound,
406  int window_step_x, int window_start_x, int window_end_x, bool has_a_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point)
407 {
408  int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
409  if(!is_fixed_point)
410  {
411  // Combine quantization offset with other offsets.
412  offset_term_s32 = add_s32(offset_term_s32, result_offset);
413  }
414 
415  int x = window_start_x;
416  for(; x <= (window_end_x - window_step_x); x += window_step_x)
417  {
418  int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
419 
420  if(has_a_offset)
421  {
422  in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
423  }
424  if(has_bias)
425  {
426  in_s32 = add_s32(in_s32, load(bias_ptr, x));
427  }
428  if(!is_fixed_point)
429  {
430  in_s32 = add_s32(in_s32, offset_term_s32);
431  in_s32 = mul_s32(in_s32, result_multipliers + x);
432  }
433 
434  if(is_fixed_point)
435  {
436  vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_symm(in_s32, load(result_multipliers, x), load(result_shifts, x), result_offset, min_s8, max_s8, is_bounded_relu));
437  }
438  else
439  {
440  vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_floating_point(in_s32, load(result_shifts, x), min_s8, max_s8, is_bounded_relu));
441  }
442  }
443  // Compute left-over elements
444  for(; x < window_end_x; ++x)
445  {
446  int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
447 
448  if(has_a_offset)
449  {
450  in_value += (*(vector_sum_col_ptr + x) * a_offset);
451  }
452  if(has_bias)
453  {
454  in_value += *(bias_ptr + x);
455  }
456 
457  if(is_fixed_point)
458  {
459  // Finalize and store the result
460  *(out_it.ptr() + x) = finalize_quantization(in_value, result_multipliers[x], result_shifts[x], offset, static_cast<int8_t>(min_bound), static_cast<int8_t>(max_bound), is_bounded_relu);
461  }
462  else
463  {
464  // Finalize quantization
465  in_value = (in_value * result_multipliers[x]) >> (-result_shifts[x]);
466 
467  // Bound and store the result
468  if(is_bounded_relu)
469  {
470  in_value = static_cast<int8_t>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
471  }
472  *(out_it.ptr() + x) = static_cast<int8_t>(std::max<int32_t>(-128, std::min<int32_t>(127, in_value)));
473  }
474  }
475 }
476 
477 template <typename T>
478 void run_offset_contribution_output_stage(const Window &window,
479  const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
480  int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col,
481  GEMMLowpOutputStageInfo output_stage, bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point)
482 {
483  using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
484  using Typer = VectorTyper<T>;
485 
486  const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0;
487  const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
488 
489  const int32_t multiplier = output_stage.gemmlowp_multiplier;
490  const int32_t shift = output_stage.gemmlowp_shift;
491  const int32_t offset = output_stage.gemmlowp_offset;
492  const int32_t min_bound = output_stage.gemmlowp_min_bound;
493  const int32_t max_bound = output_stage.gemmlowp_max_bound;
494 
495  const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
496  const int32x4_t result_shift_s32 = vdupq_n_s32(is_fixed_point ? shift : -shift);
497  const auto min_vec = wrapper::vdup_n(static_cast<T>(min_bound), ExactTagType{});
498  const auto max_vec = wrapper::vdup_n(static_cast<T>(max_bound), ExactTagType{});
499 
500  const int window_step_x = 16;
501  const auto window_start_x = static_cast<int>(window.x().start());
502  const auto window_end_x = static_cast<int>(window.x().end());
503 
504  Window win(window);
505  win.set(Window::DimX, Window::Dimension(0, 1, 1));
506 
507  Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
508 
509  Iterator mm_result_it(mm_result, win);
510  Iterator out_it(output, win);
511 
512  if((a_offset != 0) && (b_offset != 0))
513  {
514  ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
515  ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
516 
517  Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
518  Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
519 
520  const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
521 
522  // Offset in case vector_sum_col is batched
523  const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
524 
525  if(bias != nullptr)
526  {
527  Iterator bias_it = get_bias_it(collapsed_window, bias);
528  execute_window_loop(collapsed_window, [&](const Coordinates & id)
529  {
530  const int batch_id = id.z() / depth_input;
531  const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
532  const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
533  + id.y() + (id.z() % depth_input) * height_input;
534  run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()),
535  mm_result_it,
536  out_it,
537  result_offset_s32, result_shift_s32,
538  min_vec, max_vec, a_offset, b_offset, k_offset,
539  multiplier, shift, offset, min_bound, max_bound,
540  window_step_x, window_start_x, window_end_x, true, true, true, is_bounded_relu, is_fixed_point);
541  },
542  vector_sum_col_it, vector_sum_row_it, bias_it, mm_result_it, out_it);
543  }
544  else
545  {
546  execute_window_loop(collapsed_window, [&](const Coordinates & id)
547  {
548  const int batch_id = id.z() / depth_input;
549  const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
550  const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
551  + id.y() + (id.z() % depth_input) * height_input;
552  run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
553  result_offset_s32, result_shift_s32,
554  min_vec, max_vec, a_offset, b_offset, k_offset,
555  multiplier, shift, offset, min_bound, max_bound,
556  window_step_x, window_start_x, window_end_x, true, true, false, is_bounded_relu, is_fixed_point);
557  },
558  vector_sum_col_it, vector_sum_row_it, mm_result_it, out_it);
559  }
560  }
561  else if((a_offset == 0) && (b_offset != 0))
562  {
563  ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
564 
565  Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
566 
567  const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
568 
569  if(bias != nullptr)
570  {
571  Iterator bias_it = get_bias_it(collapsed_window, bias);
572  execute_window_loop(collapsed_window, [&](const Coordinates & id)
573  {
574  const int batch_id = id.z() / depth_input;
575  const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
576  + id.y() + (id.z() % depth_input) * height_input;
577  run_offset_contribution_output_stage_window<Typer>(nullptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
578  out_it,
579  result_offset_s32, result_shift_s32,
580  min_vec, max_vec, a_offset, b_offset, k_offset,
581  multiplier, shift, offset, min_bound, max_bound,
582  window_step_x, window_start_x, window_end_x, false, true, true, is_bounded_relu, is_fixed_point);
583  },
584  vector_sum_row_it, bias_it, mm_result_it, out_it);
585  }
586  else
587  {
588  execute_window_loop(collapsed_window, [&](const Coordinates & id)
589  {
590  const int batch_id = id.z() / depth_input;
591  const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
592  + id.y() + (id.z() % depth_input) * height_input;
593  run_offset_contribution_output_stage_window<Typer>(nullptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
594  result_offset_s32, result_shift_s32,
595  min_vec, max_vec, a_offset, b_offset, k_offset,
596  multiplier, shift, offset, min_bound, max_bound,
597  window_step_x, window_start_x, window_end_x, false, true, false, is_bounded_relu, is_fixed_point);
598  },
599  vector_sum_row_it, mm_result_it, out_it);
600  }
601  }
602  else if((a_offset != 0) && (b_offset == 0))
603  {
604  ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
605 
606  Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
607 
608  // Offset in case vector_sum_col is batched
609  const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
610 
611  if(bias != nullptr)
612  {
613  Iterator bias_it = get_bias_it(collapsed_window, bias);
614  execute_window_loop(collapsed_window, [&](const Coordinates & id)
615  {
616  const int batch_id = id.z() / depth_input;
617  const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
618  run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
619  out_it,
620  result_offset_s32, result_shift_s32,
621  min_vec, max_vec, a_offset, b_offset, k_offset,
622  multiplier, shift, offset, min_bound, max_bound,
623  window_step_x, window_start_x, window_end_x, true, false, true, is_bounded_relu, is_fixed_point);
624  },
625  vector_sum_col_it, bias_it, mm_result_it, out_it);
626  }
627  else
628  {
629  execute_window_loop(collapsed_window, [&](const Coordinates & id)
630  {
631  const int batch_id = id.z() / depth_input;
632  const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
633  run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, nullptr, nullptr, mm_result_it, out_it,
634  result_offset_s32, result_shift_s32,
635  min_vec, max_vec, a_offset, b_offset, k_offset,
636  multiplier, shift, offset, min_bound, max_bound,
637  window_step_x, window_start_x, window_end_x, true, false, false, is_bounded_relu, is_fixed_point);
638  },
639  vector_sum_col_it, mm_result_it, out_it);
640  }
641  }
642  else
643  {
644  if(bias != nullptr)
645  {
646  Iterator bias_it = get_bias_it(collapsed_window, bias);
647  execute_window_loop(collapsed_window, [&](const Coordinates &)
648  {
649  run_offset_contribution_output_stage_window<Typer>(nullptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
650  result_offset_s32, result_shift_s32,
651  min_vec, max_vec, a_offset, b_offset, k_offset,
652  multiplier, shift, offset, min_bound, max_bound,
653  window_step_x, window_start_x, window_end_x, false, false, true, is_bounded_relu, is_fixed_point);
654  },
655  bias_it, mm_result_it, out_it);
656  }
657  else
658  {
659  execute_window_loop(collapsed_window, [&](const Coordinates &)
660  {
661  run_offset_contribution_output_stage_window<Typer>(nullptr, nullptr, nullptr, mm_result_it, out_it,
662  result_offset_s32, result_shift_s32,
663  min_vec, max_vec, a_offset, b_offset, k_offset,
664  multiplier, shift, offset, min_bound, max_bound,
665  window_step_x, window_start_x, window_end_x, false, false, false, is_bounded_relu, is_fixed_point);
666  },
667  mm_result_it, out_it);
668  }
669  return;
670  }
671 }
672 
673 void run_offset_contribution_output_stage_symm(const Window &window,
674  const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
675  int32_t a_offset, int32_t b_offset, int32_t k_offset, bool slide_vector_sum_col,
676  GEMMLowpOutputStageInfo output_stage, bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point)
677 {
678  ARM_COMPUTE_UNUSED(vector_sum_row, b_offset, k_offset);
679 
680  const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
681 
682  const int32_t offset = output_stage.gemmlowp_offset;
683  const int32_t min_bound = output_stage.gemmlowp_min_bound;
684  const int32_t max_bound = output_stage.gemmlowp_max_bound;
685 
686  const int32_t *result_multipliers = output_stage.gemmlowp_multipliers.data();
687  const int32_t *result_shifts = output_stage.gemmlowp_shifts.data();
688  const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
689  const int8x16_t min_s8 = vdupq_n_s8(static_cast<int8_t>(min_bound));
690  const int8x16_t max_s8 = vdupq_n_s8(static_cast<int8_t>(max_bound));
691 
692  const int window_step_x = 16;
693  const auto window_start_x = static_cast<int>(window.x().start());
694  const auto window_end_x = static_cast<int>(window.x().end());
695 
696  Window win(window);
697  win.set(Window::DimX, Window::Dimension(0, 1, 1));
698 
699  Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
700 
701  Iterator mm_result_it(mm_result, win);
702  Iterator out_it(output, win);
703 
704  if(a_offset != 0)
705  {
706  ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
707 
708  Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
709 
710  // Offset in case vector_sum_col is batched
711  const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
712 
713  if(bias != nullptr)
714  {
715  Iterator bias_it = get_bias_it(collapsed_window, bias);
716  execute_window_loop(collapsed_window, [&](const Coordinates & id)
717  {
718  const int batch_id = id.z() / depth_input;
719  const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
720  run_offset_contribution_output_stage_window_symm(vector_sum_col_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
721  result_multipliers, result_shifts,
722  result_offset_s32, min_s8, max_s8,
723  a_offset, offset, min_bound, max_bound,
724  window_step_x, window_start_x, window_end_x, true, true, is_bounded_relu, is_fixed_point);
725  },
726  vector_sum_col_it, bias_it, mm_result_it, out_it);
727  }
728  else
729  {
730  execute_window_loop(collapsed_window, [&](const Coordinates & id)
731  {
732  const int batch_id = id.z() / depth_input;
733  const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
734  run_offset_contribution_output_stage_window_symm(vector_sum_col_ptr, nullptr, mm_result_it, out_it,
735  result_multipliers, result_shifts,
736  result_offset_s32, min_s8, max_s8,
737  a_offset, offset, min_bound, max_bound,
738  window_step_x, window_start_x, window_end_x, true, false, is_bounded_relu, is_fixed_point);
739  },
740  vector_sum_col_it, mm_result_it, out_it);
741  }
742  }
743  else
744  {
745  if(bias != nullptr)
746  {
747  Iterator bias_it = get_bias_it(collapsed_window, bias);
748  execute_window_loop(collapsed_window, [&](const Coordinates &)
749  {
750  run_offset_contribution_output_stage_window_symm(nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
751  result_multipliers, result_shifts,
752  result_offset_s32, min_s8, max_s8,
753  a_offset, offset, min_bound, max_bound,
754  window_step_x, window_start_x, window_end_x, false, true, is_bounded_relu, is_fixed_point);
755  },
756  bias_it, mm_result_it, out_it);
757  }
758  else
759  {
760  execute_window_loop(collapsed_window, [&](const Coordinates &)
761  {
762  run_offset_contribution_output_stage_window_symm(nullptr, nullptr, mm_result_it, out_it,
763  result_multipliers, result_shifts,
764  result_offset_s32, min_s8, max_s8,
765  a_offset, offset, min_bound, max_bound,
766  window_step_x, window_start_x, window_end_x, false, false, is_bounded_relu, is_fixed_point);
767  },
768  mm_result_it, out_it);
769  }
770  return;
771  }
772 }
773 
774 Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
775  int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
776 {
778  if(output->data_type() != DataType::QASYMM8)
779  {
780  ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) > 1 && output_stage.gemmlowp_multipliers.size() > 1 && b_offset != 0);
781  }
782  ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
784 
785  if(bias != nullptr)
786  {
788  ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
789  ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
790  }
791 
792  // If a_offset == 0, vector_sum_col can be a nullptr
793  if(a_offset != 0)
794  {
796  ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
797  }
798 
799  // If b_offset == 0, vector_sum_row can be a nullptr
800  if(b_offset != 0)
801  {
803 
804  // Check if input is a 3D reinterpretation
805  const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
806 
807  // Validate input
808  ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
809  ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
810 
811  TensorShape output_shape = output->tensor_shape();
812  if(output_shape.num_dimensions() > 1)
813  {
814  const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
815 
816  TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
817  vector_sum_row_shape.collapse_from(1);
818  output_shape.collapse_from(output_batch_idx);
819 
820  ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
821  "mm_result tensor must have the same number of batches of output tensor");
822 
823  if(a_offset != 0)
824  {
825  TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
826  vector_sum_col_shape.collapse_from(1);
827 
828  ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
829  "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
830  }
831  }
832  }
833 
834  if(output->total_size() != 0)
835  {
838  }
839 
840  return Status{};
841 }
842 
843 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *output)
844 {
845  // Output auto inizialitation if not yet initialized
846  auto_init_if_empty(*output, mm_result->clone()->set_data_type(DataType::QASYMM8));
847 
848  // Configure kernel window
849  Window win = calculate_max_window(*mm_result, Steps());
850 
851  // Note: This kernel performs 16 elements per iteration.
852  // However, since we use a left-over for loop, we cannot have any read or write out of memory
853  // For this reason num_elems_processed_per_iteration is 1 and so update_window_and_padding() can be skipped
854  Coordinates coord;
855  coord.set_num_dimensions(output->num_dimensions());
856  output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
857 
858  return std::make_pair(Status{}, win);
859 }
860 } // namespace
861 
863  : _vector_sum_col(nullptr), _vector_sum_row(nullptr), _bias(nullptr), _mm_result(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true),
864  _output_stage(GEMMLowpOutputStageInfo())
865 
866 {
867 }
868 
869 void NEGEMMLowpOffsetContributionOutputStageKernel::configure(const ITensor *mm_result, const ITensor *vector_sum_col,
870  const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
871  int32_t k, int32_t a_offset, int32_t b_offset,
872  GEMMLowpOutputStageInfo output_stage)
873 {
874  // Perform validate step
875  ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
876 
878  vector_sum_col != nullptr ? vector_sum_col->info() : nullptr, // NOLINT
879  vector_sum_row != nullptr ? vector_sum_row->info() : nullptr, // NOLINT
880  bias != nullptr ? bias->info() : nullptr, // NOLINT
881  output->info(), a_offset, b_offset, output_stage)); // NOLINT
882 
883  _vector_sum_col = vector_sum_col;
884  _vector_sum_row = vector_sum_row;
885  _bias = bias;
886  _mm_result = mm_result;
887  _output = output;
888  _a_offset = a_offset;
889  _b_offset = b_offset;
890  _k_offset = a_offset * b_offset * k;
891  _output_stage = output_stage;
892 
893  // If a_offset == 0, vector_sum_col can be a nullptr
894  if(a_offset != 0)
895  {
896  // Check if vector_sum_col_shape should be slidden or not
897  // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1
898  // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
899  _slide_vector_sum_col = vector_sum_col->info()->tensor_shape().num_dimensions() > 1;
900  }
901 
902  // Configure kernel window
903  auto win_config = validate_and_configure_window(mm_result->info(), output->info());
904  ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
905  INEKernel::configure(win_config.second);
906 }
907 
909  const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
910  int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
911 {
912  ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
913  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage));
914  ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(), output->clone().get()).first);
915  return Status{};
916 }
917 
919 {
920  ARM_COMPUTE_UNUSED(info);
923 
926  std::tie(type_min, type_max) = get_min_max(_output->info()->data_type());
927  int32_t type_min_int = type_min.get<int32_t>();
928  int32_t type_max_int = type_max.get<int32_t>();
929 
930  const bool reinterpret_as_3d = _vector_sum_row != nullptr
931  && _mm_result->info()->num_dimensions() > 1
932  && _mm_result->info()->tensor_shape().y() != _vector_sum_row->info()->tensor_shape().x();
933 
934  const bool is_bounded_relu = !(_output_stage.gemmlowp_min_bound <= type_min_int && _output_stage.gemmlowp_max_bound >= type_max_int);
935 
936  // Check if we need to perform fixed point requantization
937  const bool is_fixed_point = _output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN;
938 
939  // Check if symmetric per-channel execution
940  const bool is_signed = _output->info()->data_type() == DataType::QASYMM8_SIGNED;
941 
942  // Check if symmetric per-channel execution
943  const bool is_symm = _output_stage.is_quantized_per_channel;
944 
945  if(is_symm)
946  {
947  run_offset_contribution_output_stage_symm(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
948  reinterpret_as_3d, is_bounded_relu, is_fixed_point);
949  }
950  else
951  {
952  if(is_signed)
953  {
954  run_offset_contribution_output_stage<int8_t>(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
955  reinterpret_as_3d, is_bounded_relu, is_fixed_point);
956  }
957  else
958  {
959  run_offset_contribution_output_stage<uint8_t>(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
960  reinterpret_as_3d, is_bounded_relu, is_fixed_point);
961  }
962  }
963 }
964 
965 } // namespace arm_compute
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:846
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
Quantize using a fixed point multiplication.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
SimpleTensor< float > b
Definition: DFT.cpp:157
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status class.
Definition: Error.h:52
int32_t gemmlowp_max_bound
GEMMLowp max value used to saturate down the output result before converting back to QASYMM8...
Definition: Types.h:1959
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
GEMMLowpOutputStageType type
GEMMLowp output stage type.
Definition: Types.h:1954
Interface for Neon tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
bool is_quantized_per_channel
GEMMLowp quantized per-channel flag.
Definition: Types.h:1963
1 channel, 1 S32 per channel
T x() const
Alias to access the size of the first dimension.
Definition: Dimensions.h:87
uint8_t vgetlane(const uint8x8_t vector, const unsigned int lane)
Definition: getlane.h:91
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
void collapse_from(size_t start)
Collapse dimensions starting from a given point.
Definition: Dimensions.h:183
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
GEMMLowp output stage info.
Definition: Types.h:1952
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
void configure(const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output, int32_t k, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
Initialise the kernel&#39;s input and output.
Quantize using an integer multiplication.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941
__constant DATA_TYPE16 type_min
Definition: minmaxloc.cl:46
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
Definition: CPPTypes.h:235
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:443
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:143
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
__constant DATA_TYPE16 type_max
Definition: minmaxloc.cl:47
void vstore(uint8_t *ptr, uint8x8_t val)
Definition: store.h:39
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
uint8x8_t vdup_n(uint8_t value, traits::vector_64_tag)
Definition: dup_n.h:41
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
int8x16_t finalize_quantization_symm(int32x4x4_t &in_s32, const int32x4x4_t &result_fixedpoint_multiplier, const int32x4x4_t &result_shift, const int32x4_t &result_offset_after_shift_s32, const int8x16_t &min_s8, const int8x16_t &max_s8, const bool is_bounded_relu)
Performs final quantization step on 16 elements for symmetric quantization.
Definition: NEAsymm.h:237
T y() const
Alias to access the size of the second dimension.
Definition: Dimensions.h:92
quantized, asymmetric fixed-point 8-bit number signed
Includes all wrapper headers at once.
static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
Static function to check if given info will lead to a valid configuration of NEGEMMLowpOffsetContribu...
int32_t gemmlowp_min_bound
GEMMLowp min value used to saturate down the output result before converting back to QASYMM8...
Definition: Types.h:1958
std::tuple< PixelValue, PixelValue > get_min_max(DataType dt)
Compute the mininum and maximum values a data type can take.
Definition: Utils.h:564
Describe a multidimensional execution window.
Definition: Window.h:39
wrapper::traits::neon_vector< T, 16 >::type finalize_quantization(int32x4x4_t &in_s32, int32x4_t result_shift_s32, typename wrapper::traits::neon_vector< T, 16 >::type min, typename wrapper::traits::neon_vector< T, 16 >::type max)
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205