Compute Library
 21.02
elementwise_quantized_list.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef SRC_CORE_NEON_KERNELS_ELEMENTWISE_QUANTIZED_LIST_H
25 #define SRC_CORE_NEON_KERNELS_ELEMENTWISE_QUANTIZED_LIST_H
26 
28 
29 namespace arm_compute
30 {
31 namespace cpu
32 {
33 float32x4x4_t load_quantized(const uint8_t *input1_ptr, const int32x4_t &offset, const float32x4_t &scale)
34 {
35  qasymm8x16_t x = vld1q_u8(input1_ptr);
36  const float32x4x4_t out =
37  {
38  {
39  vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(x))))), offset)), scale),
40  vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(x))))), offset)), scale),
41  vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(x))))), offset)), scale),
42  vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(x))))), offset)), scale),
43  }
44  };
45  return out;
46 }
47 
48 float32x4x4_t load_quantized_signed(const int8_t *input1_ptr, const int32x4_t &offset, const float32x4_t &scale)
49 {
50  qasymm8x16_signed_t x = vld1q_s8(input1_ptr);
51  const float32x4x4_t out =
52  {
53  {
54  vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(x)))), offset)), scale),
55  vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(x)))), offset)), scale),
56  vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(x)))), offset)), scale),
57  vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(x)))), offset)), scale),
58  }
59  };
60  return out;
61 }
62 
63 void store_quantized(uint8_t *output_ptr, const uint32x4x4_t &out)
64 {
65  const uint8x8_t pa = vqmovn_u16(vcombine_u16(vqmovn_u32(out.val[0]), vqmovn_u32(out.val[1])));
66  const uint8x8_t pb = vqmovn_u16(vcombine_u16(vqmovn_u32(out.val[2]), vqmovn_u32(out.val[3])));
67  vst1q_u8(output_ptr, vcombine_u8(pa, pb));
68 }
69 
70 void store_quantized(uint8_t *output_ptr, const int32x4x4_t &out)
71 {
72  const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(out.val[0]), vqmovn_s32(out.val[1])));
73  const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(out.val[2]), vqmovn_s32(out.val[3])));
74  vst1q_u8(output_ptr, vcombine_u8(pa, pb));
75 }
76 
77 void store_quantized(uint8_t *output_ptr, const float32x4x4_t &rf, const float32x4_t &offset, const float32x4_t &invscale)
78 {
79  int32x4x4_t out =
80  {
81  {
82  vcvtq_s32_f32(vmlaq_f32(offset, rf.val[0], invscale)),
83  vcvtq_s32_f32(vmlaq_f32(offset, rf.val[1], invscale)),
84  vcvtq_s32_f32(vmlaq_f32(offset, rf.val[2], invscale)),
85  vcvtq_s32_f32(vmlaq_f32(offset, rf.val[3], invscale)),
86  }
87  };
88  store_quantized(output_ptr, out);
89 }
90 
91 void store_quantized_signed(int8_t *output_ptr, const int32x4x4_t &out)
92 {
93  const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(out.val[0]), vqmovn_s32(out.val[1])));
94  const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(out.val[2]), vqmovn_s32(out.val[3])));
95  vst1q_s8(output_ptr, vcombine_s8(pa, pb));
96 }
97 
98 void store_quantized_signed(int8_t *output_ptr, const float32x4x4_t &rf, const float32x4_t &offset, const float32x4_t &invscale)
99 {
100  int32x4x4_t out =
101  {
102  {
103  vcvtq_s32_f32(vmlaq_f32(offset, rf.val[0], invscale)),
104  vcvtq_s32_f32(vmlaq_f32(offset, rf.val[1], invscale)),
105  vcvtq_s32_f32(vmlaq_f32(offset, rf.val[2], invscale)),
106  vcvtq_s32_f32(vmlaq_f32(offset, rf.val[3], invscale)),
107  }
108  };
109  store_quantized_signed(output_ptr, out);
110 }
111 
112 template <ArithmeticOperation op>
113 inline uint8_t elementwise_arithm_op_quantized_scalar(const float &a, const float &b, UniformQuantizationInfo qinfo)
114 {
115  return quantize_qasymm8(elementwise_arithm_op_scalar<op>(a, b), qinfo);
116 }
117 
118 template <ArithmeticOperation op>
120 {
121  return quantize_qasymm8_signed(elementwise_arithm_op_scalar<op>(a, b), qinfo);
122 }
123 
124 template <ArithmeticOperation op>
125 inline float32x4x4_t elementwise_arithm_op(const float32x4x4_t &a, const float32x4x4_t &b)
126 {
127  using neon_vector_float = wrapper::traits::neon_vector<float, 4>;
128  float32x4x4_t out =
129  {
130  {
131  elementwise_arithm_op<op, neon_vector_float>(a.val[0], b.val[0]),
132  elementwise_arithm_op<op, neon_vector_float>(a.val[1], b.val[1]),
133  elementwise_arithm_op<op, neon_vector_float>(a.val[2], b.val[2]),
134  elementwise_arithm_op<op, neon_vector_float>(a.val[3], b.val[3]),
135  }
136  };
137  return out;
138 }
139 
140 template <ComparisonOperation op>
141 inline uint8_t elementwise_comp_op_quantized_scalar(const float &a, const float &b, UniformQuantizationInfo qinfo)
142 {
143  ARM_COMPUTE_UNUSED(qinfo);
144  return elementwise_comp_op_scalar<op>(a, b);
145 }
146 
147 template <ComparisonOperation op>
148 inline uint32x4x4_t elementwise_comp_op(const float32x4x4_t &a, const float32x4x4_t &b)
149 {
150  uint32x4x4_t out =
151  {
152  {
153  elementwise_comp_op<op, float32x4_t, uint32x4_t>(a.val[0], b.val[0]),
154  elementwise_comp_op<op, float32x4_t, uint32x4_t>(a.val[1], b.val[1]),
155  elementwise_comp_op<op, float32x4_t, uint32x4_t>(a.val[2], b.val[2]),
156  elementwise_comp_op<op, float32x4_t, uint32x4_t>(a.val[3], b.val[3])
157  }
158  };
159  return out;
160 }
161 
162 template <ArithmeticOperation op>
163 inline int elementwise_arithm_op_quantized_loop(int window_start_x, int window_end_x, int window_step_x,
164  const uint8_t *input1_ptr, const uint8_t *input2_ptr, uint8_t *output_ptr,
165  int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2,
166  float32x4_t voffseto, float32x4_t invvscaleo)
167 {
168  int x = window_start_x;
169  for(; x <= (window_end_x - window_step_x); x += window_step_x)
170  {
171  // Get inputs and compute output
172  const float32x4x4_t af = load_quantized(input1_ptr + x, voffset1, vscale1);
173  const float32x4x4_t bf = load_quantized(input2_ptr + x, voffset2, vscale2);
174  const float32x4x4_t rf = elementwise_arithm_op<op>(af, bf);
175  store_quantized(output_ptr + x, rf, voffseto, invvscaleo);
176  }
177  return x;
178 }
179 
180 template <ArithmeticOperation op>
181 inline int elementwise_arithm_op_quantized_singed_loop(int window_start_x, int window_end_x, int window_step_x,
182  const int8_t *input1_ptr, const int8_t *input2_ptr, int8_t *output_ptr,
183  int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2,
184  float32x4_t voffseto, float32x4_t invvscaleo)
185 {
186  int x = window_start_x;
187  for(; x <= (window_end_x - window_step_x); x += window_step_x)
188  {
189  // Get inputs and compute output
190  const float32x4x4_t af = load_quantized_signed(input1_ptr + x, voffset1, vscale1);
191  const float32x4x4_t bf = load_quantized_signed(input2_ptr + x, voffset2, vscale2);
192  const float32x4x4_t rf = elementwise_arithm_op<op>(af, bf);
193  store_quantized_signed(output_ptr + x, rf, voffseto, invvscaleo);
194  }
195  return x;
196 }
197 
198 template <ArithmeticOperation op>
199 inline int elementwise_arithm_op_quantized_broadcast_loop(int window_start_x, int window_end_x, int window_step_x,
200  const uint8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr,
201  int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast,
202  float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
203 {
204  int x = window_start_x;
205  for(; x <= (window_end_x - window_step_x); x += window_step_x)
206  {
207  const float32x4x4_t af = load_quantized(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast);
208  const float32x4x4_t rf = elementwise_arithm_op<op>(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector);
209  store_quantized(output_ptr + x, rf, voffseto, invvscaleo);
210  }
211  return x;
212 }
213 template <ArithmeticOperation op>
214 inline int elementwise_arithm_op_quantized_signed_broadcast_loop(int window_start_x, int window_end_x, int window_step_x,
215  const int8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, int8_t *output_ptr,
216  int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast,
217  float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
218 {
219  int x = window_start_x;
220  for(; x <= (window_end_x - window_step_x); x += window_step_x)
221  {
222  const float32x4x4_t af = load_quantized_signed(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast);
223  const float32x4x4_t rf = elementwise_arithm_op<op>(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector);
224  store_quantized_signed(output_ptr + x, rf, voffseto, invvscaleo);
225  }
226  return x;
227 }
228 
229 template <ComparisonOperation op>
230 inline int elementwise_comp_op_quantized_loop(int window_start_x, int window_end_x, int window_step_x,
231  const uint8_t *input1_ptr, const uint8_t *input2_ptr, uint8_t *output_ptr,
232  int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2,
233  float32x4_t voffseto, float32x4_t invvscaleo)
234 {
235  ARM_COMPUTE_UNUSED(voffseto, invvscaleo);
236  int x = window_start_x;
237  for(; x <= (window_end_x - window_step_x); x += window_step_x)
238  {
239  const float32x4x4_t af = load_quantized(input1_ptr + x, voffset1, vscale1);
240  const float32x4x4_t bf = load_quantized(input2_ptr + x, voffset2, vscale2);
241  const uint32x4x4_t rf = elementwise_comp_op<op>(af, bf);
242  store_quantized(output_ptr + x, rf);
243  }
244  return x;
245 }
246 
247 template <ComparisonOperation op>
248 inline int elementwise_comp_op_quantized_signed_loop(int window_start_x, int window_end_x, int window_step_x,
249  const int8_t *input1_ptr, const int8_t *input2_ptr, uint8_t *output_ptr,
250  int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2,
251  float32x4_t voffseto, float32x4_t invvscaleo)
252 {
253  ARM_COMPUTE_UNUSED(voffseto, invvscaleo);
254  int x = window_start_x;
255  for(; x <= (window_end_x - window_step_x); x += window_step_x)
256  {
257  const float32x4x4_t af = load_quantized_signed(input1_ptr + x, voffset1, vscale1);
258  const float32x4x4_t bf = load_quantized_signed(input2_ptr + x, voffset2, vscale2);
259  const uint32x4x4_t rf = elementwise_comp_op<op>(af, bf);
260  store_quantized(output_ptr + x, rf);
261  }
262  return x;
263 }
264 
265 template <ComparisonOperation op>
266 inline int elementwise_comp_op_quantized_broadcast_loop(int window_start_x, int window_end_x, int window_step_x,
267  const uint8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr,
268  int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast,
269  float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
270 {
271  ARM_COMPUTE_UNUSED(voffseto, invvscaleo);
272  int x = window_start_x;
273  for(; x <= (window_end_x - window_step_x); x += window_step_x)
274  {
275  const float32x4x4_t af = load_quantized(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast);
276  const uint32x4x4_t rf = elementwise_comp_op<op>(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector);
277  store_quantized(output_ptr + x, rf);
278  }
279  return x;
280 }
281 
282 template <ComparisonOperation op>
283 inline int elementwise_comp_op_quantized_signed_broadcast_loop(int window_start_x, int window_end_x, int window_step_x,
284  const int8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr,
285  int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast,
286  float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
287 {
288  ARM_COMPUTE_UNUSED(voffseto, invvscaleo);
289  int x = window_start_x;
290  for(; x <= (window_end_x - window_step_x); x += window_step_x)
291  {
292  const float32x4x4_t af = load_quantized_signed(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast);
293  const uint32x4x4_t rf = elementwise_comp_op<op>(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector);
294  store_quantized(output_ptr + x, rf);
295  }
296  return x;
297 }
298 
299 void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window,
300  uint8_t (*scalar_func)(const float &, const float &, UniformQuantizationInfo),
301  int (*broadcast_func)(int, int, int, const uint8_t *, float32x4x4_t, uint8_t *, int32x4_t, float32x4_t,
302  float32x4_t, float32x4_t, const bool),
303  int (*neon_func)(int, int, int, const uint8_t *, const uint8_t *, uint8_t *,
304  int32x4_t, int32x4_t, float32x4_t, float32x4_t,
305  float32x4_t, float32x4_t))
306 {
307  // Create input windows
308  Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
309  Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
310 
311  // Clear X Dimension on execution window as we handle manually
312  Window win = window;
313  win.set(Window::DimX, Window::Dimension(0, 1, 1));
314 
315  const int window_step_x = 16;
316  const auto window_start_x = static_cast<int>(window.x().start());
317  const auto window_end_x = static_cast<int>(window.x().end());
318  const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x();
319 
320  const UniformQuantizationInfo output_qinfo = out->info()->quantization_info().uniform();
321 
322  // Output quantization info (add 0.5 to round toward the nearest integer - 0.5 rounds away from zero)
323  const float32x4_t voffseto = vdupq_n_f32(output_qinfo.offset + 0.5f);
324  const float32x4_t invvscaleo = vdupq_n_f32(1.f / output_qinfo.scale);
325 
326  if(is_broadcast_across_x)
327  {
328  // Select the broadcast input on the X axis
329  const bool is_broadcast_input_2 = input2_win.x().step() == 0;
330  Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
331  Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
332  const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1;
333  const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1;
334 
335  const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
336  const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
337 
338  const int32x4_t voffset_non_broadcast = vdupq_n_s32(non_broadcast_qinfo.offset);
339  const float32x4_t vscale_non_broadcast = vdupq_n_f32(non_broadcast_qinfo.scale);
340 
341  // Clear X Dimension on execution window as we handle manually
342  non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
343 
344  Iterator broadcast_input(broadcast_tensor, broadcast_win);
345  Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
346  Iterator output(out, win);
347 
348  execute_window_loop(win, [&](const Coordinates &)
349  {
350  const auto non_broadcast_input_ptr = reinterpret_cast<const uint8_t *>(non_broadcast_input.ptr());
351  const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
352 
353  const uint8_t broadcast_value = *reinterpret_cast<const uint8_t *>(broadcast_input.ptr());
354  const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_u8(broadcast_value), broadcast_qinfo);
355 
356  int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr,
357  voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2);
358  for(; x < window_end_x; ++x)
359  {
360  const float afs = dequantize_qasymm8(*(non_broadcast_input_ptr + x), non_broadcast_qinfo);
361  const float bfs = dequantize_qasymm8(broadcast_value, broadcast_qinfo);
362  *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, !is_broadcast_input_2 ? afs : bfs, output_qinfo);
363  }
364  },
365  broadcast_input, non_broadcast_input, output);
366  }
367  else
368  {
369  const UniformQuantizationInfo input1_qinfo = in1->info()->quantization_info().uniform();
370  const UniformQuantizationInfo input2_qinfo = in2->info()->quantization_info().uniform();
371 
372  // Input1 quantization info
373  const int32x4_t voffset1 = vdupq_n_s32(input1_qinfo.offset);
374  const float32x4_t vscale1 = vdupq_n_f32(input1_qinfo.scale);
375 
376  // Input2 quantization info
377  const int32x4_t voffset2 = vdupq_n_s32(input2_qinfo.offset);
378  const float32x4_t vscale2 = vdupq_n_f32(input2_qinfo.scale);
379 
380  // Clear X Dimension on execution window as we handle manually
381  input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
382  input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
383 
384  Iterator input1(in1, input1_win);
385  Iterator input2(in2, input2_win);
386  Iterator output(out, win);
387 
388  execute_window_loop(win, [&](const Coordinates &)
389  {
390  const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
391  const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
392  const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
393 
394  int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, voffset1, voffset2,
395  vscale1, vscale2, voffseto, invvscaleo);
396  for(; x < window_end_x; ++x)
397  {
398  const float afs = dequantize_qasymm8(*(input1_ptr + x), input1_qinfo);
399  const float bfs = dequantize_qasymm8(*(input2_ptr + x), input2_qinfo);
400  *(output_ptr + x) = (*scalar_func)(afs, bfs, output_qinfo);
401  }
402  },
403  input1, input2, output);
404  }
405 }
406 
407 void elementwise_comp_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window,
408  uint8_t (*scalar_func)(const float &, const float &, UniformQuantizationInfo),
409  int (*broadcast_func)(int, int, int, const int8_t *, float32x4x4_t, uint8_t *, int32x4_t, float32x4_t,
410  float32x4_t, float32x4_t, const bool),
411  int (*neon_func)(int, int, int, const int8_t *, const int8_t *, uint8_t *,
412  int32x4_t, int32x4_t, float32x4_t, float32x4_t,
413  float32x4_t, float32x4_t))
414 {
415  // Create input windows
416  Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
417  Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
418 
419  // Clear X Dimension on execution window as we handle manually
420  Window win = window;
421  win.set(Window::DimX, Window::Dimension(0, 1, 1));
422 
423  const int window_step_x = 16;
424  const auto window_start_x = static_cast<int>(window.x().start());
425  const auto window_end_x = static_cast<int>(window.x().end());
426  const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x();
427 
428  const UniformQuantizationInfo output_qinfo = out->info()->quantization_info().uniform();
429 
430  const float32x4_t voffseto = vdupq_n_f32(output_qinfo.offset);
431  const float32x4_t invvscaleo = vdupq_n_f32(1.f / output_qinfo.scale);
432 
433  if(is_broadcast_across_x)
434  {
435  // Select the broadcast input on the X axis
436  const bool is_broadcast_input_2 = input2_win.x().step() == 0;
437  Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
438  Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
439  const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1;
440  const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1;
441 
442  const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
443  const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
444 
445  const int32x4_t voffset_non_broadcast = vdupq_n_s32(non_broadcast_qinfo.offset);
446  const float32x4_t vscale_non_broadcast = vdupq_n_f32(non_broadcast_qinfo.scale);
447 
448  // Clear X Dimension on execution window as we handle manually
449  non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
450 
451  Iterator broadcast_input(broadcast_tensor, broadcast_win);
452  Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
453  Iterator output(out, win);
454 
455  execute_window_loop(win, [&](const Coordinates &)
456  {
457  const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
458  const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
459 
460  const int8_t broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
461  const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_s8(broadcast_value), broadcast_qinfo);
462 
463  int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr,
464  voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2);
465  for(; x < window_end_x; ++x)
466  {
467  const float afs = dequantize_qasymm8_signed(*(non_broadcast_input_ptr + x), non_broadcast_qinfo);
468  const float bfs = dequantize_qasymm8_signed(broadcast_value, broadcast_qinfo);
469  *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, !is_broadcast_input_2 ? afs : bfs, output_qinfo);
470  }
471  },
472  broadcast_input, non_broadcast_input, output);
473  }
474  else
475  {
476  const UniformQuantizationInfo input1_qinfo = in1->info()->quantization_info().uniform();
477  const UniformQuantizationInfo input2_qinfo = in2->info()->quantization_info().uniform();
478 
479  // Input1 quantization info
480  const int32x4_t voffset1 = vdupq_n_s32(input1_qinfo.offset);
481  const float32x4_t vscale1 = vdupq_n_f32(input1_qinfo.scale);
482 
483  // Input2 quantization info
484  const int32x4_t voffset2 = vdupq_n_s32(input2_qinfo.offset);
485  const float32x4_t vscale2 = vdupq_n_f32(input2_qinfo.scale);
486 
487  // Clear X Dimension on execution window as we handle manually
488  input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
489  input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
490 
491  Iterator input1(in1, input1_win);
492  Iterator input2(in2, input2_win);
493  Iterator output(out, win);
494 
495  execute_window_loop(win, [&](const Coordinates &)
496  {
497  const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
498  const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
499  const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
500 
501  int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, voffset1, voffset2,
502  vscale1, vscale2, voffseto, invvscaleo);
503  for(; x < window_end_x; ++x)
504  {
505  const float afs = dequantize_qasymm8_signed(*(input1_ptr + x), input1_qinfo);
506  const float bfs = dequantize_qasymm8_signed(*(input2_ptr + x), input2_qinfo);
507  *(output_ptr + x) = (*scalar_func)(afs, bfs, output_qinfo);
508  }
509  },
510  input1, input2, output);
511  }
512 }
513 
514 void elementwise_op_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window,
515  int8_t (*scalar_func)(const float &, const float &, UniformQuantizationInfo),
516  int (*broadcast_func)(int, int, int, const int8_t *, float32x4x4_t, int8_t *, int32x4_t, float32x4_t,
517  float32x4_t, float32x4_t, const bool),
518  int (*neon_func)(int, int, int, const int8_t *, const int8_t *, int8_t *,
519  int32x4_t, int32x4_t, float32x4_t, float32x4_t,
520  float32x4_t, float32x4_t))
521 {
522  // Create input windows
523  Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
524  Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
525 
526  // Clear X Dimension on execution window as we handle manually
527  Window win = window;
528  win.set(Window::DimX, Window::Dimension(0, 1, 1));
529 
530  const int window_step_x = 16;
531  const auto window_start_x = static_cast<int>(window.x().start());
532  const auto window_end_x = static_cast<int>(window.x().end());
533  const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x();
534 
535  const UniformQuantizationInfo output_qinfo = out->info()->quantization_info().uniform();
536 
537  const float32x4_t voffseto = vdupq_n_f32(output_qinfo.offset);
538  const float32x4_t invvscaleo = vdupq_n_f32(1.f / output_qinfo.scale);
539 
540  if(is_broadcast_across_x)
541  {
542  // Select the broadcast input on the X axis
543  const bool is_broadcast_input_2 = input2_win.x().step() == 0;
544  Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
545  Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
546  const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1;
547  const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1;
548 
549  const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
550  const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
551 
552  const int32x4_t voffset_non_broadcast = vdupq_n_s32(non_broadcast_qinfo.offset);
553  const float32x4_t vscale_non_broadcast = vdupq_n_f32(non_broadcast_qinfo.scale);
554 
555  // Clear X Dimension on execution window as we handle manually
556  non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
557 
558  Iterator broadcast_input(broadcast_tensor, broadcast_win);
559  Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
560  Iterator output(out, win);
561 
562  execute_window_loop(win, [&](const Coordinates &)
563  {
564  const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
565  const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
566 
567  const int8_t broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
568  const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_s8(broadcast_value), broadcast_qinfo);
569 
570  int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr,
571  voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2);
572  for(; x < window_end_x; ++x)
573  {
574  const float afs = dequantize_qasymm8_signed(*(non_broadcast_input_ptr + x), non_broadcast_qinfo);
575  const float bfs = dequantize_qasymm8_signed(broadcast_value, broadcast_qinfo);
576  *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, !is_broadcast_input_2 ? afs : bfs, output_qinfo);
577  }
578  },
579  broadcast_input, non_broadcast_input, output);
580  }
581  else
582  {
583  const UniformQuantizationInfo input1_qinfo = in1->info()->quantization_info().uniform();
584  const UniformQuantizationInfo input2_qinfo = in2->info()->quantization_info().uniform();
585 
586  // Input1 quantization info
587  const int32x4_t voffset1 = vdupq_n_s32(input1_qinfo.offset);
588  const float32x4_t vscale1 = vdupq_n_f32(input1_qinfo.scale);
589 
590  // Input2 quantization info
591  const int32x4_t voffset2 = vdupq_n_s32(input2_qinfo.offset);
592  const float32x4_t vscale2 = vdupq_n_f32(input2_qinfo.scale);
593 
594  // Clear X Dimension on execution window as we handle manually
595  input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
596  input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
597 
598  Iterator input1(in1, input1_win);
599  Iterator input2(in2, input2_win);
600  Iterator output(out, win);
601 
602  execute_window_loop(win, [&](const Coordinates &)
603  {
604  const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
605  const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
606  const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
607 
608  int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, voffset1, voffset2,
609  vscale1, vscale2, voffseto, invvscaleo);
610  for(; x < window_end_x; ++x)
611  {
612  const float afs = dequantize_qasymm8_signed(*(input1_ptr + x), input1_qinfo);
613  const float bfs = dequantize_qasymm8_signed(*(input2_ptr + x), input2_qinfo);
614  *(output_ptr + x) = (*scalar_func)(afs, bfs, output_qinfo);
615  }
616  },
617  input1, input2, output);
618  }
619 }
620 
621 template <ArithmeticOperation op>
622 void elementwise_arithm_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
623 {
624  elementwise_op_quantized(in1, in2, out, window, &elementwise_arithm_op_quantized_scalar<op>,
625  &elementwise_arithm_op_quantized_broadcast_loop<op>,
626  &elementwise_arithm_op_quantized_loop<op>);
627 }
628 template <ArithmeticOperation op>
629 void elementwise_arithm_op_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
630 {
631  elementwise_op_quantized_signed(in1, in2, out, window, &elementwise_arithm_op_quantized_signed_scalar<op>,
632  &elementwise_arithm_op_quantized_signed_broadcast_loop<op>,
633  &elementwise_arithm_op_quantized_singed_loop<op>);
634 }
635 
636 template <ComparisonOperation op>
637 void elementwise_comp_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
638 {
639  elementwise_op_quantized(in1, in2, out, window, &elementwise_comp_op_quantized_scalar<op>,
640  &elementwise_comp_op_quantized_broadcast_loop<op>,
641  &elementwise_comp_op_quantized_loop<op>);
642 }
643 
644 template <ComparisonOperation op>
645 void elementwise_comp_op_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
646 {
647  elementwise_comp_quantized_signed(in1, in2, out, window, &elementwise_comp_op_quantized_scalar<op>,
648  &elementwise_comp_op_quantized_signed_broadcast_loop<op>,
649  &elementwise_comp_op_quantized_signed_loop<op>);
650 }
651 } // namespace cpu
652 } // namespace arm_compute
653 
654 #endif /* SRC_CORE_NEON_KERNELS_ELEMENTWISE_QUANTIZED_LIST_H */
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:846
int elementwise_comp_op_quantized_broadcast_loop(int window_start_x, int window_end_x, int window_step_x, const uint8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr, int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast, float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
int elementwise_arithm_op_quantized_singed_loop(int window_start_x, int window_end_x, int window_step_x, const int8_t *input1_ptr, const int8_t *input2_ptr, int8_t *output_ptr, int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2, float32x4_t voffseto, float32x4_t invvscaleo)
float32x4x4_t load_quantized(const uint8_t *input1_ptr, const int32x4_t &offset, const float32x4_t &scale)
float dequantize_qasymm8(uint8_t value, const INFO_TYPE &qinfo)
Dequantize a value given an unsigned 8-bit asymmetric quantization scheme.
int elementwise_arithm_op_quantized_broadcast_loop(int window_start_x, int window_end_x, int window_step_x, const uint8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr, int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast, float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
SimpleTensor< float > b
Definition: DFT.cpp:157
uint8_t quantize_qasymm8(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given an unsigned 8-bit asymmetric quantization scheme.
constexpr int step() const
Return the step of the dimension.
Definition: Window.h:104
float32x4x2_t vdequantize(const uint8x8_t &qv, const UniformQuantizationInfo &qi)
Dequantize a neon vector holding 8 quantized values.
Definition: NEAsymm.h:415
int elementwise_comp_op_quantized_loop(int window_start_x, int window_end_x, int window_step_x, const uint8_t *input1_ptr, const uint8_t *input2_ptr, uint8_t *output_ptr, int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2, float32x4_t voffseto, float32x4_t invvscaleo)
Quantization info when assuming per layer quantization.
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:77
Interface for Neon tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
int8_t elementwise_arithm_op_quantized_signed_scalar(const float &a, const float &b, UniformQuantizationInfo qinfo)
T x() const
Alias to access the size of the first dimension.
Definition: Dimensions.h:87
VectorType::type elementwise_arithm_op(const typename VectorType::type &a, const typename VectorType::type &b)
uint8_t elementwise_arithm_op_quantized_scalar(const float &a, const float &b, UniformQuantizationInfo qinfo)
void elementwise_arithm_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
Create the appropriate Neon vector given its type and size in terms of elements.
Definition: traits.h:48
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
int8_t quantize_qasymm8_signed(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given a signed 8-bit asymmetric quantization scheme.
Coordinates of an item.
Definition: Coordinates.h:37
UniformQuantizationInfo uniform() const
Return per layer quantization info.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
Definition: Helpers.inl:139
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
int elementwise_arithm_op_quantized_signed_broadcast_loop(int window_start_x, int window_end_x, int window_step_x, const int8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, int8_t *output_ptr, int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast, float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
Window broadcast_if_dimension_le_one(const TensorShape &shape) const
Don&#39;t advance in the dimension where shape is less equal to 1.
Definition: Window.inl:120
OutputVectorType elementwise_comp_op(const InputVectorType &a, const InputVectorType &b)
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
void elementwise_op_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window, int8_t(*scalar_func)(const float &, const float &, UniformQuantizationInfo), int(*broadcast_func)(int, int, int, const int8_t *, float32x4x4_t, int8_t *, int32x4_t, float32x4_t, float32x4_t, float32x4_t, const bool), int(*neon_func)(int, int, int, const int8_t *, const int8_t *, int8_t *, int32x4_t, int32x4_t, float32x4_t, float32x4_t, float32x4_t, float32x4_t))
void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window, uint8_t(*scalar_func)(const float &, const float &, UniformQuantizationInfo), int(*broadcast_func)(int, int, int, const uint8_t *, float32x4x4_t, uint8_t *, int32x4_t, float32x4_t, float32x4_t, float32x4_t, const bool), int(*neon_func)(int, int, int, const uint8_t *, const uint8_t *, uint8_t *, int32x4_t, int32x4_t, float32x4_t, float32x4_t, float32x4_t, float32x4_t))
int elementwise_comp_op_quantized_signed_loop(int window_start_x, int window_end_x, int window_step_x, const int8_t *input1_ptr, const int8_t *input2_ptr, uint8_t *output_ptr, int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2, float32x4_t voffseto, float32x4_t invvscaleo)
int elementwise_arithm_op_quantized_loop(int window_start_x, int window_end_x, int window_step_x, const uint8_t *input1_ptr, const uint8_t *input2_ptr, uint8_t *output_ptr, int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2, float32x4_t voffseto, float32x4_t invvscaleo)
void elementwise_comp_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
float32x4x4_t load_quantized_signed(const int8_t *input1_ptr, const int32x4_t &offset, const float32x4_t &scale)
const QuantizationInfo qinfo
Definition: Im2Col.cpp:155
float dequantize_qasymm8_signed(int8_t value, const INFO_TYPE &qinfo)
Dequantize a value given a signed 8-bit asymmetric quantization scheme.
void store_quantized_signed(int8_t *output_ptr, const int32x4x4_t &out)
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
void elementwise_comp_op_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
std::vector< NodeID > bfs(Graph &g)
Breadth first search traversal.
int8x16_t qasymm8x16_signed_t
8 bit quantized signed asymmetric vector with 16 elements
Definition: NEAsymm.h:43
void elementwise_comp_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window, uint8_t(*scalar_func)(const float &, const float &, UniformQuantizationInfo), int(*broadcast_func)(int, int, int, const int8_t *, float32x4x4_t, uint8_t *, int32x4_t, float32x4_t, float32x4_t, float32x4_t, const bool), int(*neon_func)(int, int, int, const int8_t *, const int8_t *, uint8_t *, int32x4_t, int32x4_t, float32x4_t, float32x4_t, float32x4_t, float32x4_t))
uint8_t elementwise_comp_op_quantized_scalar(const float &a, const float &b, UniformQuantizationInfo qinfo)
constexpr int end() const
Return the end of the dimension.
Definition: Window.h:99
Iterator updated by execute_window_loop for each window element.
Definition: Helpers.h:46
constexpr int start() const
Return the start of the dimension.
Definition: Window.h:94
Describe a multidimensional execution window.
Definition: Window.h:39
void store_quantized(uint8_t *output_ptr, const uint32x4x4_t &out)
void elementwise_arithm_op_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
Sets the macro arm_any if compiling for Aarch32 or Aarch64.
constexpr const Dimension & x() const
Alias to access the first dimension of the window.
Definition: Window.h:145
uint8x16_t qasymm8x16_t
8 bit quantized asymmetric vector with 16 elements
Definition: NEAsymm.h:37
int elementwise_comp_op_quantized_signed_broadcast_loop(int window_start_x, int window_end_x, int window_step_x, const int8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr, int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast, float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)