51 inline int32x4x4_t load_results_input(
const Iterator &mm_result_it, int32_t x)
56 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 0),
57 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 4),
58 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 8),
59 vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 12)
64 inline int32x4x4_t load(
const int32_t *ptr, int32_t x)
69 vld1q_s32(ptr + x + 0),
70 vld1q_s32(ptr + x + 4),
71 vld1q_s32(ptr + x + 8),
72 vld1q_s32(ptr + x + 12)
77 inline int32x4x4_t add_s32(int32x4x4_t a, int32x4_t
b)
82 vaddq_s32(a.val[0], b),
83 vaddq_s32(a.val[1], b),
84 vaddq_s32(a.val[2], b),
85 vaddq_s32(a.val[3], b)
90 inline int32x4x4_t add_s32(int32x4x4_t a, int32x4x4_t
b)
95 vaddq_s32(a.val[0], b.val[0]),
96 vaddq_s32(a.val[1], b.val[1]),
97 vaddq_s32(a.val[2], b.val[2]),
98 vaddq_s32(a.val[3], b.val[3])
103 inline int32x4x4_t mul_s32(int32x4x4_t &a, int32_t mul_scalar)
108 vmulq_n_s32(a.val[0], mul_scalar),
109 vmulq_n_s32(a.val[1], mul_scalar),
110 vmulq_n_s32(a.val[2], mul_scalar),
111 vmulq_n_s32(a.val[3], mul_scalar)
116 inline int32x4x4_t mul_s32(int32x4x4_t &a,
const int32_t *multilpier)
121 vmulq_s32(a.val[0], vld1q_s32(multilpier)),
122 vmulq_s32(a.val[1], vld1q_s32(multilpier + 4)),
123 vmulq_s32(a.val[2], vld1q_s32(multilpier + 8)),
124 vmulq_s32(a.val[3], vld1q_s32(multilpier + 12))
129 inline int32x4x4_t get_a_offset(
const int32_t *vector_sum_col_ptr, int32_t a_offset, int32_t x)
131 int32x4x4_t a_offset_term_s32 = load(vector_sum_col_ptr, x);
133 a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
134 a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
135 a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
136 a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
137 return a_offset_term_s32;
140 inline int32x4_t get_b_offset(
const int32_t *vector_sum_row_ptr, int32_t b_offset)
142 int32x4_t b_offset_term_s32 = vld1q_dup_s32(vector_sum_row_ptr);
143 b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, b_offset);
144 return b_offset_term_s32;
147 inline int32x4x4_t get_k_offset(int32_t k_offset)
152 vdupq_n_s32(k_offset),
153 vdupq_n_s32(k_offset),
154 vdupq_n_s32(k_offset),
155 vdupq_n_s32(k_offset)
160 inline uint8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8,
bool is_bounded_relu)
162 const static int32x4_t zero_s32 = vdupq_n_s32(0);
165 in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
166 in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
167 in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
168 in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
171 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
172 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
173 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
174 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
177 const int16x8x2_t in_s16 =
180 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
181 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
186 uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));
190 out_u8 = vmaxq_u8(out_u8, min_u8);
191 out_u8 = vminq_u8(out_u8, max_u8);
197 inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8,
bool is_bounded_relu)
199 const static int32x4_t zero_s32 = vdupq_n_s32(0);
202 in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
203 in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
204 in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
205 in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
208 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
209 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
210 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
211 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
214 const int16x8x2_t in_s16 =
217 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
218 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
223 int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
227 out_s8 = vmaxq_s8(out_s8, min_s8);
228 out_s8 = vminq_s8(out_s8, max_s8);
234 inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8,
bool is_bounded_relu)
236 const static int32x4_t zero_s32 = vdupq_n_s32(0);
239 in_s32.val[0] = vshlq_s32(in_s32.val[0], vnegq_s32(result_shift_s32.val[0]));
240 in_s32.val[1] = vshlq_s32(in_s32.val[1], vnegq_s32(result_shift_s32.val[1]));
241 in_s32.val[2] = vshlq_s32(in_s32.val[2], vnegq_s32(result_shift_s32.val[2]));
242 in_s32.val[3] = vshlq_s32(in_s32.val[3], vnegq_s32(result_shift_s32.val[3]));
245 in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
246 in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
247 in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
248 in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
251 const int16x8x2_t in_s16 =
254 vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
255 vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
260 int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
264 out_s8 = vmaxq_s8(out_s8, min_s8);
265 out_s8 = vminq_s8(out_s8, max_s8);
271 template <
typename T>
275 using vtype =
typename wrapper::traits::neon_bitvector_t<T, wrapper::traits::BitWidth::W128>;
278 inline Window get_win_vector_sum(
const Window &window)
280 Window win_vector_sum(window);
281 win_vector_sum.set(
Window::DimY, Window::Dimension(0, 0, 0));
282 win_vector_sum.set(
Window::DimZ, Window::Dimension(0, 0, 0));
283 return win_vector_sum;
286 inline Iterator get_vector_sum_col_it(
const Window &window,
const ITensor *vector_sum_col)
288 Iterator vector_sum_col_it(vector_sum_col, get_win_vector_sum(window));
289 return vector_sum_col_it;
292 inline Iterator get_vector_sum_row_it(
const Window &window,
const ITensor *vector_sum_row)
294 Window win_vector_sum_row = get_win_vector_sum(window);
295 win_vector_sum_row.set(
Window::DimX, Window::Dimension(0, 0, 0));
296 Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
297 return vector_sum_row_it;
300 inline Iterator get_bias_it(
const Window &window,
const ITensor *bias)
302 Window win_bias(window);
305 Iterator bias_it(bias, win_bias);
309 template <
typename VT>
310 inline void run_offset_contribution_output_stage_window(
const int32_t *vector_sum_col_ptr,
const int32_t *vector_sum_row_ptr,
const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
311 const int32x4_t result_offset_s32,
const int32x4_t result_shift_s32,
312 typename VT::vtype min_vec,
typename VT::vtype max_vec,
313 int32_t a_offset, int32_t b_offset, int32_t k_offset,
314 int32_t multiplier, int32_t shift, int32_t
offset, int32_t min_bound, int32_t max_bound,
315 int window_step_x,
int window_start_x,
int window_end_x,
bool has_a_offset,
bool has_b_offset,
bool has_bias,
bool is_bounded_relu,
bool is_fixed_point)
317 int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
321 offset_term_s32 = add_s32(offset_term_s32, result_offset_s32);
323 if(has_a_offset && has_b_offset)
325 offset_term_s32 = add_s32(offset_term_s32, get_k_offset(k_offset));
329 offset_term_s32 = add_s32(offset_term_s32, get_b_offset(vector_sum_row_ptr, b_offset));
332 int x = window_start_x;
333 for(; x <= (window_end_x - window_step_x); x += window_step_x)
335 int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
339 in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
343 in_s32 = add_s32(in_s32, load(bias_ptr, x));
345 if(!is_fixed_point || has_b_offset)
347 in_s32 = add_s32(in_s32, offset_term_s32);
351 in_s32 = mul_s32(in_s32, multiplier);
356 wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
357 finalize_quantization(in_s32, multiplier, shift, result_offset_s32, min_vec, max_vec, is_bounded_relu));
361 wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
362 finalize_quantization_floating_point(in_s32, result_shift_s32, min_vec, max_vec, is_bounded_relu));
366 for(; x < window_end_x; ++x)
368 int32_t in_value = *(
reinterpret_cast<const int32_t *
>(mm_result_it.ptr()) + x) +
wrapper::vgetlane(offset_term_s32.val[0], 0);
372 in_value += (*(vector_sum_col_ptr + x) * a_offset);
376 in_value += *(bias_ptr + x);
382 *
reinterpret_cast<typename VT::stype *
>(out_it.ptr() + x) =
finalize_quantization(in_value, multiplier, shift, offset,
383 static_cast<typename VT::stype>(min_bound),
384 static_cast<typename VT::stype
>(max_bound), is_bounded_relu);
389 in_value = (in_value * multiplier) >> shift;
394 in_value =
static_cast<typename VT::stype
>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
397 std::min<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::max()), in_value)));
402 inline void run_offset_contribution_output_stage_window_symm(
const int32_t *vector_sum_col_ptr,
const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
403 const int32_t *result_multipliers,
const int32_t *result_shifts,
404 const int32x4_t result_offset, int8x16_t min_s8, int8x16_t max_s8,
405 int32_t a_offset, int32_t offset, int32_t min_bound, int32_t max_bound,
406 int window_step_x,
int window_start_x,
int window_end_x,
bool has_a_offset,
bool has_bias,
bool is_bounded_relu,
bool is_fixed_point)
408 int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
412 offset_term_s32 = add_s32(offset_term_s32, result_offset);
415 int x = window_start_x;
416 for(; x <= (window_end_x - window_step_x); x += window_step_x)
418 int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
422 in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
426 in_s32 = add_s32(in_s32, load(bias_ptr, x));
430 in_s32 = add_s32(in_s32, offset_term_s32);
431 in_s32 = mul_s32(in_s32, result_multipliers + x);
436 vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x),
finalize_quantization_symm(in_s32, load(result_multipliers, x), load(result_shifts, x), result_offset, min_s8, max_s8, is_bounded_relu));
440 vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_floating_point(in_s32, load(result_shifts, x), min_s8, max_s8, is_bounded_relu));
444 for(; x < window_end_x; ++x)
446 int32_t in_value = *(
reinterpret_cast<const int32_t *
>(mm_result_it.ptr()) + x) +
wrapper::vgetlane(offset_term_s32.val[0], 0);
450 in_value += (*(vector_sum_col_ptr + x) * a_offset);
454 in_value += *(bias_ptr + x);
460 *(out_it.ptr() + x) =
finalize_quantization(in_value, result_multipliers[x], result_shifts[x], offset, static_cast<int8_t>(min_bound),
static_cast<int8_t
>(max_bound), is_bounded_relu);
465 in_value = (in_value * result_multipliers[x]) >> (-result_shifts[x]);
470 in_value =
static_cast<int8_t
>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
472 *(out_it.ptr() + x) = static_cast<int8_t>(std::max<int32_t>(-128, std::min<int32_t>(127, in_value)));
477 template <
typename T>
478 void run_offset_contribution_output_stage(
const Window &window,
479 const ITensor *mm_result,
const ITensor *vector_sum_col,
const ITensor *vector_sum_row,
const ITensor *bias, ITensor *output,
480 int32_t a_offset, int32_t b_offset, int32_t k_offset,
bool slide_vector_sum_col,
481 GEMMLowpOutputStageInfo output_stage,
bool is_gemm3d,
bool is_bounded_relu,
bool is_fixed_point)
483 using ExactTagType =
typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
484 using Typer = VectorTyper<T>;
486 const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0;
487 const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
489 const int32_t multiplier = output_stage.gemmlowp_multiplier;
490 const int32_t shift = output_stage.gemmlowp_shift;
491 const int32_t offset = output_stage.gemmlowp_offset;
492 const int32_t min_bound = output_stage.gemmlowp_min_bound;
493 const int32_t max_bound = output_stage.gemmlowp_max_bound;
495 const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
496 const int32x4_t result_shift_s32 = vdupq_n_s32(is_fixed_point ? shift : -shift);
497 const auto min_vec =
wrapper::vdup_n(static_cast<T>(min_bound), ExactTagType{});
498 const auto max_vec =
wrapper::vdup_n(static_cast<T>(max_bound), ExactTagType{});
500 const int window_step_x = 16;
501 const auto window_start_x =
static_cast<int>(window.x().start());
502 const auto window_end_x =
static_cast<int>(window.x().end());
507 Window collapsed_window = win.collapse_if_possible(win,
Window::DimZ);
509 Iterator mm_result_it(mm_result, win);
510 Iterator out_it(output, win);
512 if((a_offset != 0) && (b_offset != 0))
517 Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
518 Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
520 const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
523 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
527 Iterator bias_it = get_bias_it(collapsed_window, bias);
530 const int batch_id =
id.z() / depth_input;
531 const auto vector_sum_col_ptr =
reinterpret_cast<const int32_t *
>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
532 const auto vector_sum_row_ptr =
reinterpret_cast<const int32_t *
>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
533 +
id.y() + (
id.z() % depth_input) * height_input;
534 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, vector_sum_row_ptr,
reinterpret_cast<const int32_t *
>(bias_it.ptr()),
537 result_offset_s32, result_shift_s32,
538 min_vec, max_vec, a_offset, b_offset, k_offset,
539 multiplier, shift, offset, min_bound, max_bound,
540 window_step_x, window_start_x, window_end_x,
true,
true,
true, is_bounded_relu, is_fixed_point);
542 vector_sum_col_it, vector_sum_row_it, bias_it, mm_result_it, out_it);
548 const int batch_id =
id.z() / depth_input;
549 const auto vector_sum_col_ptr =
reinterpret_cast<const int32_t *
>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
550 const auto vector_sum_row_ptr =
reinterpret_cast<const int32_t *
>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
551 +
id.y() + (
id.z() % depth_input) * height_input;
552 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, vector_sum_row_ptr,
nullptr, mm_result_it, out_it,
553 result_offset_s32, result_shift_s32,
554 min_vec, max_vec, a_offset, b_offset, k_offset,
555 multiplier, shift,
offset, min_bound, max_bound,
556 window_step_x, window_start_x, window_end_x,
true,
true,
false, is_bounded_relu, is_fixed_point);
558 vector_sum_col_it, vector_sum_row_it, mm_result_it, out_it);
561 else if((a_offset == 0) && (b_offset != 0))
565 Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
567 const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
571 Iterator bias_it = get_bias_it(collapsed_window, bias);
574 const int batch_id =
id.z() / depth_input;
575 const auto vector_sum_row_ptr =
reinterpret_cast<const int32_t *
>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
576 +
id.y() + (
id.z() % depth_input) * height_input;
577 run_offset_contribution_output_stage_window<Typer>(
nullptr, vector_sum_row_ptr,
reinterpret_cast<const int32_t *
>(bias_it.ptr()), mm_result_it,
579 result_offset_s32, result_shift_s32,
580 min_vec, max_vec, a_offset, b_offset, k_offset,
581 multiplier, shift, offset, min_bound, max_bound,
582 window_step_x, window_start_x, window_end_x,
false,
true,
true, is_bounded_relu, is_fixed_point);
584 vector_sum_row_it, bias_it, mm_result_it, out_it);
590 const int batch_id =
id.z() / depth_input;
591 const auto vector_sum_row_ptr =
reinterpret_cast<const int32_t *
>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
592 +
id.y() + (
id.z() % depth_input) * height_input;
593 run_offset_contribution_output_stage_window<Typer>(
nullptr, vector_sum_row_ptr,
nullptr, mm_result_it, out_it,
594 result_offset_s32, result_shift_s32,
595 min_vec, max_vec, a_offset, b_offset, k_offset,
596 multiplier, shift,
offset, min_bound, max_bound,
597 window_step_x, window_start_x, window_end_x,
false,
true,
false, is_bounded_relu, is_fixed_point);
599 vector_sum_row_it, mm_result_it, out_it);
602 else if((a_offset != 0) && (b_offset == 0))
606 Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
609 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
613 Iterator bias_it = get_bias_it(collapsed_window, bias);
616 const int batch_id =
id.z() / depth_input;
617 const auto vector_sum_col_ptr =
reinterpret_cast<const int32_t *
>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
618 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr,
nullptr,
reinterpret_cast<const int32_t *
>(bias_it.ptr()), mm_result_it,
620 result_offset_s32, result_shift_s32,
621 min_vec, max_vec, a_offset, b_offset, k_offset,
622 multiplier, shift, offset, min_bound, max_bound,
623 window_step_x, window_start_x, window_end_x,
true,
false,
true, is_bounded_relu, is_fixed_point);
625 vector_sum_col_it, bias_it, mm_result_it, out_it);
631 const int batch_id =
id.z() / depth_input;
632 const auto vector_sum_col_ptr =
reinterpret_cast<const int32_t *
>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
633 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr,
nullptr,
nullptr, mm_result_it, out_it,
634 result_offset_s32, result_shift_s32,
635 min_vec, max_vec, a_offset, b_offset, k_offset,
636 multiplier, shift,
offset, min_bound, max_bound,
637 window_step_x, window_start_x, window_end_x,
true,
false,
false, is_bounded_relu, is_fixed_point);
639 vector_sum_col_it, mm_result_it, out_it);
646 Iterator bias_it = get_bias_it(collapsed_window, bias);
649 run_offset_contribution_output_stage_window<Typer>(
nullptr,
nullptr,
reinterpret_cast<const int32_t *
>(bias_it.ptr()), mm_result_it, out_it,
650 result_offset_s32, result_shift_s32,
651 min_vec, max_vec, a_offset, b_offset, k_offset,
652 multiplier, shift, offset, min_bound, max_bound,
653 window_step_x, window_start_x, window_end_x,
false,
false,
true, is_bounded_relu, is_fixed_point);
655 bias_it, mm_result_it, out_it);
661 run_offset_contribution_output_stage_window<Typer>(
nullptr,
nullptr,
nullptr, mm_result_it, out_it,
662 result_offset_s32, result_shift_s32,
663 min_vec, max_vec, a_offset, b_offset, k_offset,
664 multiplier, shift,
offset, min_bound, max_bound,
665 window_step_x, window_start_x, window_end_x,
false,
false,
false, is_bounded_relu, is_fixed_point);
667 mm_result_it, out_it);
673 void run_offset_contribution_output_stage_symm(
const Window &window,
674 const ITensor *mm_result,
const ITensor *vector_sum_col,
const ITensor *vector_sum_row,
const ITensor *bias, ITensor *output,
675 int32_t a_offset, int32_t b_offset, int32_t k_offset,
bool slide_vector_sum_col,
676 GEMMLowpOutputStageInfo output_stage,
bool is_gemm3d,
bool is_bounded_relu,
bool is_fixed_point)
680 const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
682 const int32_t offset = output_stage.gemmlowp_offset;
683 const int32_t min_bound = output_stage.gemmlowp_min_bound;
684 const int32_t max_bound = output_stage.gemmlowp_max_bound;
686 const int32_t *result_multipliers = output_stage.gemmlowp_multipliers.data();
687 const int32_t *result_shifts = output_stage.gemmlowp_shifts.data();
688 const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
689 const int8x16_t min_s8 = vdupq_n_s8(static_cast<int8_t>(min_bound));
690 const int8x16_t max_s8 = vdupq_n_s8(static_cast<int8_t>(max_bound));
692 const int window_step_x = 16;
693 const auto window_start_x =
static_cast<int>(window.x().start());
694 const auto window_end_x =
static_cast<int>(window.x().end());
699 Window collapsed_window = win.collapse_if_possible(win,
Window::DimZ);
701 Iterator mm_result_it(mm_result, win);
702 Iterator out_it(output, win);
708 Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
711 const int vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
715 Iterator bias_it = get_bias_it(collapsed_window, bias);
718 const int batch_id =
id.z() / depth_input;
719 const auto vector_sum_col_ptr =
reinterpret_cast<const int32_t *
>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
720 run_offset_contribution_output_stage_window_symm(vector_sum_col_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
721 result_multipliers, result_shifts,
722 result_offset_s32, min_s8, max_s8,
723 a_offset, offset, min_bound, max_bound,
724 window_step_x, window_start_x, window_end_x,
true,
true, is_bounded_relu, is_fixed_point);
726 vector_sum_col_it, bias_it, mm_result_it, out_it);
732 const int batch_id =
id.z() / depth_input;
733 const auto vector_sum_col_ptr =
reinterpret_cast<const int32_t *
>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);
734 run_offset_contribution_output_stage_window_symm(vector_sum_col_ptr,
nullptr, mm_result_it, out_it,
735 result_multipliers, result_shifts,
736 result_offset_s32, min_s8, max_s8,
737 a_offset, offset, min_bound, max_bound,
738 window_step_x, window_start_x, window_end_x,
true,
false, is_bounded_relu, is_fixed_point);
740 vector_sum_col_it, mm_result_it, out_it);
747 Iterator bias_it = get_bias_it(collapsed_window, bias);
750 run_offset_contribution_output_stage_window_symm(
nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
751 result_multipliers, result_shifts,
752 result_offset_s32, min_s8, max_s8,
753 a_offset, offset, min_bound, max_bound,
754 window_step_x, window_start_x, window_end_x,
false,
true, is_bounded_relu, is_fixed_point);
756 bias_it, mm_result_it, out_it);
762 run_offset_contribution_output_stage_window_symm(
nullptr,
nullptr, mm_result_it, out_it,
763 result_multipliers, result_shifts,
764 result_offset_s32, min_s8, max_s8,
765 a_offset, offset, min_bound, max_bound,
766 window_step_x, window_start_x, window_end_x,
false,
false, is_bounded_relu, is_fixed_point);
768 mm_result_it, out_it);
774 Status
validate_arguments(
const ITensorInfo *mm_result,
const ITensorInfo *vector_sum_col,
const ITensorInfo *vector_sum_row,
const ITensorInfo *bias,
const ITensorInfo *output,
775 int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
805 const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
808 ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
812 if(output_shape.num_dimensions() > 1)
814 const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
816 TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
818 output_shape.collapse_from(output_batch_idx);
821 "mm_result tensor must have the same number of batches of output tensor");
825 TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
826 vector_sum_col_shape.collapse_from(1);
829 "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
834 if(output->total_size() != 0)
843 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *output)
855 coord.set_num_dimensions(output->num_dimensions());
856 output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
858 return std::make_pair(Status{}, win);
863 : _vector_sum_col(nullptr), _vector_sum_row(nullptr), _bias(nullptr), _mm_result(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true),
871 int32_t k, int32_t a_offset, int32_t b_offset,
878 vector_sum_col !=
nullptr ? vector_sum_col->
info() :
nullptr,
879 vector_sum_row !=
nullptr ? vector_sum_row->
info() :
nullptr,
880 bias !=
nullptr ? bias->
info() :
nullptr,
881 output->
info(), a_offset, b_offset, output_stage));
883 _vector_sum_col = vector_sum_col;
884 _vector_sum_row = vector_sum_row;
886 _mm_result = mm_result;
888 _a_offset = a_offset;
889 _b_offset = b_offset;
890 _k_offset = a_offset * b_offset * k;
891 _output_stage = output_stage;
903 auto win_config = validate_and_configure_window(mm_result->
info(), output->
info());
905 INEKernel::configure(win_config.second);
927 int32_t type_min_int =
type_min.get<int32_t>();
928 int32_t type_max_int =
type_max.get<int32_t>();
930 const bool reinterpret_as_3d = _vector_sum_row !=
nullptr 947 run_offset_contribution_output_stage_symm(window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
948 reinterpret_as_3d, is_bounded_relu, is_fixed_point);
954 run_offset_contribution_output_stage<int8_t>(
window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
955 reinterpret_as_3d, is_bounded_relu, is_fixed_point);
959 run_offset_contribution_output_stage<uint8_t>(
window, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage,
960 reinterpret_as_3d, is_bounded_relu, is_fixed_point);
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Class describing the value of a pixel for any image format.
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
Quantize using a fixed point multiplication.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
NEGEMMLowpOffsetContributionOutputStageKernel()
Constructor.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
Store the tensor's metadata.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
int32_t gemmlowp_max_bound
GEMMLowp max value used to saturate down the output result before converting back to QASYMM8...
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
GEMMLowpOutputStageType type
GEMMLowp output stage type.
Interface for Neon tensor.
Copyright (c) 2017-2021 Arm Limited.
bool is_quantized_per_channel
GEMMLowp quantized per-channel flag.
1 channel, 1 S32 per channel
T x() const
Alias to access the size of the first dimension.
uint8_t vgetlane(const uint8x8_t vector, const unsigned int lane)
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
void collapse_from(size_t start)
Collapse dimensions starting from a given point.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
GEMMLowp output stage info.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
void configure(const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output, int32_t k, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
Initialise the kernel's input and output.
Quantize using an integer multiplication.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
__constant DATA_TYPE16 type_min
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
__constant DATA_TYPE16 type_max
void vstore(uint8_t *ptr, uint8x8_t val)
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
uint8x8_t vdup_n(uint8_t value, traits::vector_64_tag)
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
int8x16_t finalize_quantization_symm(int32x4x4_t &in_s32, const int32x4x4_t &result_fixedpoint_multiplier, const int32x4x4_t &result_shift, const int32x4_t &result_offset_after_shift_s32, const int8x16_t &min_s8, const int8x16_t &max_s8, const bool is_bounded_relu)
Performs final quantization step on 16 elements for symmetric quantization.
T y() const
Alias to access the size of the second dimension.
quantized, asymmetric fixed-point 8-bit number signed
Includes all wrapper headers at once.
static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
Static function to check if given info will lead to a valid configuration of NEGEMMLowpOffsetContribu...
int32_t gemmlowp_min_bound
GEMMLowp min value used to saturate down the output result before converting back to QASYMM8...
std::tuple< PixelValue, PixelValue > get_min_max(DataType dt)
Compute the mininum and maximum values a data type can take.
Describe a multidimensional execution window.
wrapper::traits::neon_vector< T, 16 >::type finalize_quantization(int32x4x4_t &in_s32, int32x4_t result_shift_s32, typename wrapper::traits::neon_vector< T, 16 >::type min, typename wrapper::traits::neon_vector< T, 16 >::type max)
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)