Compute Library
 21.11
NEReductionOperationKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
30 #include "arm_compute/core/Utils.h"
33 #include "src/core/CPP/Validate.h"
35 #include "src/core/NEON/NEMath.h"
38 #include "support/SaturateCast.h"
39 
41 #include <arm_neon.h>
42 
43 namespace arm_compute
44 {
45 namespace
46 {
47 // Helper function that calls vqmovun/vqmvn, vcombine and vstore, allows templating of RedOpYZW_quantized
48 template <typename T>
49 void combine_and_store(int16x8_t t1, int16x8_t t2, Iterator &output, int offset = 0)
50 {
51  if(std::is_same<T, uint8_t>::value)
52  {
54  wrapper::vstore(output.ptr() + offset, res);
55  }
56  else
57  {
59  wrapper::vstore(reinterpret_cast<int8_t *>(output.ptr() + offset), res);
60  }
61 }
62 
63 template <typename T>
64 uint32x4x4_t calculate_index(uint32_t idx, T a, T b, uint32x4x4_t c, ReductionOperation op, int axis)
65 {
66  uint32x4_t mask{ 0 };
68  {
69  mask = wrapper::vcgt(b, a);
70  }
71  else
72  {
73  mask = wrapper::vclt(b, a);
74  }
75 
76  uint32x4_t vec_idx = { idx, idx + 1, idx + 2, idx + 3 };
77  if(axis != 0)
78  {
79  vec_idx = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
80  }
81  uint32x4x4_t res = { { wrapper::vbsl(mask, vec_idx, c.val[0]), 0, 0, 0 } };
82 
83  return res;
84 }
85 
86 template <typename T>
87 uint32x4x4_t calculate_index_quantized(uint32_t idx, T a, T b, uint32x4x4_t c, ReductionOperation op, int axis)
88 {
89  uint32x4x4_t mask{ { 0 } };
90  uint8x16_t mask_u8{ 0 };
92  {
93  mask_u8 = wrapper::vcgt(b, a);
94  }
95  else
96  {
97  mask_u8 = wrapper::vclt(b, a);
98  }
99  auto wide_u16_1 = wrapper::vorr(vshll_n_u8(wrapper::vgetlow(mask_u8), 8), wrapper::vmovl(wrapper::vgetlow(mask_u8)));
100  auto wide_u16_2 = wrapper::vorr(vshll_n_u8(wrapper::vgethigh(mask_u8), 8), wrapper::vmovl(wrapper::vgethigh(mask_u8)));
101  mask.val[0] = wrapper::vorr(vshll_n_u16(wrapper::vgetlow(wide_u16_1), 16), wrapper::vmovl(wrapper::vgetlow(wide_u16_1)));
102  mask.val[1] = wrapper::vorr(vshll_n_u16(wrapper::vgethigh(wide_u16_1), 16), wrapper::vmovl(wrapper::vgethigh(wide_u16_1)));
103  mask.val[2] = wrapper::vorr(vshll_n_u16(wrapper::vgetlow(wide_u16_2), 16), wrapper::vmovl(wrapper::vgetlow(wide_u16_2)));
104  mask.val[3] = wrapper::vorr(vshll_n_u16(wrapper::vgethigh(wide_u16_2), 16), wrapper::vmovl(wrapper::vgethigh(wide_u16_2)));
105 
106  uint32x4x4_t vec_idx = { { { idx + 0, idx + 1, idx + 2, idx + 3 },
107  { idx + 4, idx + 5, idx + 6, idx + 7 },
108  { idx + 8, idx + 9, idx + 10, idx + 11 },
109  { idx + 12, idx + 13, idx + 14, idx + 15 }
110  }
111  };
112  if(axis != 0)
113  {
114  vec_idx.val[0] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
115  vec_idx.val[1] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
116  vec_idx.val[2] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
117  vec_idx.val[3] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
118  }
119  uint32x4x4_t res =
120  {
121  {
122  vbslq_u32(mask.val[0], vec_idx.val[0], c.val[0]),
123  vbslq_u32(mask.val[1], vec_idx.val[1], c.val[1]),
124  vbslq_u32(mask.val[2], vec_idx.val[2], c.val[2]),
125  vbslq_u32(mask.val[3], vec_idx.val[3], c.val[3])
126  }
127  };
128 
129  return res;
130 }
131 
132 // Helper function to calculate the minimum value of the input vector. All the elements in the output vector contain the min value.
133 template <typename T>
134 inline typename std::enable_if < std::is_same<T, float32x4_t>::value || std::is_same<T, int32x4_t>::value,
135  typename std::conditional<std::is_same<T, float32x4_t>::value, float32x2_t, int32x2_t>::type >::type
136  calculate_min(T in)
137 {
138  auto pmin = wrapper::vpmin(wrapper::vgethigh(in), wrapper::vgetlow(in));
139  return wrapper::vpmin(pmin, pmin);
140 }
141 
142 // Helper function to calculate the minimum value of the input vector. All the elements in the output vector contain the min value.
143 template <typename T>
144 inline typename std::enable_if < std::is_same<T, uint8x16_t>::value || std::is_same<T, int8x16_t>::value,
145  typename std::conditional<std::is_same<T, uint8x16_t>::value, uint8x8_t, int8x8_t>::type >::type
146  calculate_min(T in)
147 {
148  auto pmin = wrapper::vpmin(wrapper::vgethigh(in), wrapper::vgetlow(in));
149  pmin = wrapper::vpmin(pmin, pmin);
150  pmin = wrapper::vpmin(pmin, pmin);
151  return wrapper::vpmin(pmin, pmin);
152 }
153 
154 // Helper function to calculate the maximum value of the input vector. All the elements in the output vector contain the max value.
155 template <typename T>
156 inline typename std::enable_if < std::is_same<T, float32x4_t>::value || std::is_same<T, int32x4_t>::value,
157  typename std::conditional<std::is_same<T, float32x4_t>::value, float32x2_t, int32x2_t>::type >::type
158  calculate_max(T in)
159 {
160  auto pmax = wrapper::vpmax(wrapper::vgethigh(in), wrapper::vgetlow(in));
161  return wrapper::vpmax(pmax, pmax);
162 }
163 
164 // Helper function to calculate the maximum value of the input vector. All the elements in the output vector contain the max value.
165 template <typename T>
166 inline typename std::enable_if < std::is_same<T, uint8x16_t>::value || std::is_same<T, int8x16_t>::value,
167  typename std::conditional<std::is_same<T, uint8x16_t>::value, uint8x8_t, int8x8_t>::type >::type
168  calculate_max(T in)
169 {
170  auto pmax = wrapper::vpmax(wrapper::vgethigh(in), wrapper::vgetlow(in));
171  pmax = wrapper::vpmax(pmax, pmax);
172  pmax = wrapper::vpmax(pmax, pmax);
173  return wrapper::vpmax(pmax, pmax);
174 }
175 
176 template <typename T>
177 uint32_t calculate_vector_index(uint32x4x4_t vec_res_idx, T vec_res_value, ReductionOperation op)
178 {
179  uint32x4_t res_idx_mask{ 0 };
180  uint32x4_t mask_ones = vdupq_n_u32(0xFFFFFFFF);
181 
183  {
184  auto pmin = calculate_min(vec_res_value);
185  auto mask = wrapper::vceq(vec_res_value, wrapper::vcombine(pmin, pmin));
186  res_idx_mask = wrapper::vand(vec_res_idx.val[0], mask);
187  }
188  else
189  {
190  auto pmax = calculate_max(vec_res_value);
191  auto mask = wrapper::vceq(vec_res_value, wrapper::vcombine(pmax, pmax));
192  res_idx_mask = wrapper::vand(vec_res_idx.val[0], mask);
193  }
194 
195  res_idx_mask = wrapper::vadd(res_idx_mask, mask_ones);
196  auto pmin = wrapper::vpmin(wrapper::vgethigh(res_idx_mask), wrapper::vgetlow(res_idx_mask));
197  pmin = wrapper::vpmin(pmin, pmin);
198  uint32_t res = wrapper::vgetlane(pmin, 0);
199 
200  return (res - 0xFFFFFFFF);
201 }
202 
203 template <typename T>
204 uint32_t calculate_vector_index_quantized(uint32x4x4_t vec_res_idx, T vec_res_value, ReductionOperation op)
205 {
206  uint32x4x4_t res_idx_mask{ { 0 } };
207  uint32x4_t mask_ones = vdupq_n_u32(0xFFFFFFFF);
208  uint8x16_t mask_u8{ 0 };
210  {
211  auto pmin = calculate_min(vec_res_value);
212  mask_u8 = wrapper::vceq(vec_res_value, wrapper::vcombine(pmin, pmin));
213  }
214  else
215  {
216  auto pmax = calculate_max(vec_res_value);
217  mask_u8 = wrapper::vceq(vec_res_value, wrapper::vcombine(pmax, pmax));
218  }
219 
220  // Widen vectors
221  auto wide_u16_1 = wrapper::vorr(vshll_n_u8(wrapper::vgetlow(mask_u8), 8), wrapper::vmovl(wrapper::vgetlow(mask_u8)));
222  auto wide_u16_2 = wrapper::vorr(vshll_n_u8(wrapper::vgethigh(mask_u8), 8), wrapper::vmovl(wrapper::vgethigh(mask_u8)));
223  auto wide_u32_1 = wrapper::vorr(vshll_n_u16(wrapper::vgetlow(wide_u16_1), 16), wrapper::vmovl(wrapper::vgetlow(wide_u16_1)));
224  auto wide_u32_2 = wrapper::vorr(vshll_n_u16(wrapper::vgethigh(wide_u16_1), 16), wrapper::vmovl(wrapper::vgethigh(wide_u16_1)));
225  auto wide_u32_3 = wrapper::vorr(vshll_n_u16(wrapper::vgetlow(wide_u16_2), 16), wrapper::vmovl(wrapper::vgetlow(wide_u16_2)));
226  auto wide_u32_4 = wrapper::vorr(vshll_n_u16(wrapper::vgethigh(wide_u16_2), 16), wrapper::vmovl(wrapper::vgethigh(wide_u16_2)));
227  res_idx_mask.val[0] = wrapper::vand(vec_res_idx.val[0], wide_u32_1);
228  res_idx_mask.val[1] = wrapper::vand(vec_res_idx.val[1], wide_u32_2);
229  res_idx_mask.val[2] = wrapper::vand(vec_res_idx.val[2], wide_u32_3);
230  res_idx_mask.val[3] = wrapper::vand(vec_res_idx.val[3], wide_u32_4);
231  res_idx_mask.val[0] = wrapper::vadd(res_idx_mask.val[0], mask_ones);
232  res_idx_mask.val[1] = wrapper::vadd(res_idx_mask.val[1], mask_ones);
233  res_idx_mask.val[2] = wrapper::vadd(res_idx_mask.val[2], mask_ones);
234  res_idx_mask.val[3] = wrapper::vadd(res_idx_mask.val[3], mask_ones);
235 
236  uint32_t res = 0xFFFFFFFF;
237  int iter = 0;
238  do
239  {
240  auto pmin = wrapper::vpmin(wrapper::vgethigh(res_idx_mask.val[iter]), wrapper::vgetlow(res_idx_mask.val[iter]));
241  pmin = wrapper::vpmin(pmin, pmin);
242  res = std::min(wrapper::vgetlane(pmin, 0), res);
243  iter++;
244  }
245  while(iter < 4);
246 
247  return (res - 0xFFFFFFFF);
248 }
249 
250 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
251 template <>
252 uint32x4x4_t calculate_index(uint32_t idx, float16x8_t a, float16x8_t b, uint32x4x4_t c, ReductionOperation op, int axis)
253 {
254  uint32x4x2_t mask{ 0 };
255  uint16x8_t mask_u16{ 0 };
257  {
258  mask_u16 = wrapper::vcgt(b, a);
259  }
260  else
261  {
262  mask_u16 = wrapper::vclt(b, a);
263  }
264  mask.val[0] = wrapper::vmovl(wrapper::vgetlow(mask_u16));
265  mask.val[1] = wrapper::vmovl(wrapper::vgethigh(mask_u16));
266  uint32x4x2_t vec_idx = { { { idx + 0, idx + 1, idx + 2, idx + 3 },
267  { idx + 4, idx + 5, idx + 6, idx + 7 }
268  }
269  };
270  if(axis != 0)
271  {
272  vec_idx.val[0] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
273  vec_idx.val[1] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
274  }
275  uint32x4x4_t res = { wrapper::vbsl(mask.val[0], vec_idx.val[0], c.val[0]),
276  wrapper::vbsl(mask.val[1], vec_idx.val[1], c.val[1]),
277  0, 0
278  };
279 
280  return res;
281 }
282 
283 // Helper function to calculate the minimum value of the input vector. All the elements in the output vector contain the min value.
284 inline float16x4_t calculate_min(float16x8_t in)
285 {
286  auto pmin = wrapper::vpmin(wrapper::vgethigh(in), wrapper::vgetlow(in));
287  pmin = wrapper::vpmin(pmin, pmin);
288  return wrapper::vpmin(pmin, pmin);
289 }
290 // Helper function to calculate the maximum value of the input vector. All the elements in the output vector contain the max value.
291 inline float16x4_t calculate_max(float16x8_t in)
292 {
293  auto pmax = wrapper::vpmax(wrapper::vgethigh(in), wrapper::vgetlow(in));
294  pmax = wrapper::vpmax(pmax, pmax);
295  return wrapper::vpmax(pmax, pmax);
296 }
297 
298 template <>
299 uint32_t calculate_vector_index(uint32x4x4_t vec_res_idx, float16x8_t vec_res_value, ReductionOperation op)
300 {
301  uint32x4x2_t res_idx_mask{ 0 };
302  uint32x4_t mask_ones = vdupq_n_u32(0xFFFFFFFF);
303  uint16x8_t mask_u16;
305  {
306  auto pmin = calculate_min(vec_res_value);
307  mask_u16 = wrapper::vceq(vec_res_value, wrapper::vcombine(pmin, pmin));
308  }
309  else
310  {
311  auto pmax = calculate_max(vec_res_value);
312  mask_u16 = wrapper::vceq(vec_res_value, wrapper::vcombine(pmax, pmax));
313  }
314 
315  // Widen vectors
316  auto wide_u32_1 = wrapper::vorr(vshll_n_u16(wrapper::vgetlow(mask_u16), 8), wrapper::vmovl(wrapper::vgetlow(mask_u16)));
317  auto wide_u32_2 = wrapper::vorr(vshll_n_u16(wrapper::vgethigh(mask_u16), 8), wrapper::vmovl(wrapper::vgethigh(mask_u16)));
318  res_idx_mask.val[0] = wrapper::vand(vec_res_idx.val[0], wide_u32_1);
319  res_idx_mask.val[1] = wrapper::vand(vec_res_idx.val[1], wide_u32_2);
320  res_idx_mask.val[0] = wrapper::vadd(res_idx_mask.val[0], mask_ones);
321  res_idx_mask.val[1] = wrapper::vadd(res_idx_mask.val[1], mask_ones);
322 
323  uint32_t res = 0xFFFFFFFF;
324  uint32_t iter = 0;
325  do
326  {
327  auto pmin = wrapper::vpmin(wrapper::vgethigh(res_idx_mask.val[iter]), wrapper::vgetlow(res_idx_mask.val[iter]));
328  pmin = wrapper::vpmin(pmin, pmin);
329  res = std::min(wrapper::vgetlane(pmin, 0), res);
330  iter++;
331  }
332  while(iter < 2);
333 
334  return (res - 0xFFFFFFFF);
335 }
336 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
337 
338 template <class F>
339 class Reducer
340 {
341 public:
342  static void reduceX(const Window &window, const ITensor *input, ITensor *output, F f, const ReductionOperation op)
343  {
344  // Set out window
345  Window out_window(window);
346  out_window.set(Window::DimX, Window::Dimension(0, 1, 1));
347 
348  f(window, out_window, input, output, op);
349  }
350  static void reduceY(const Window &window, const ITensor *input, ITensor *output, F f, const ReductionOperation op)
351  {
352  // Set in window
353  Window in_window(window);
354  Window out_window(window);
355 
356  in_window.set(Window::DimY, Window::Dimension(0, 1, 1));
357  out_window.set(Window::DimY, Window::Dimension(0, output->info()->dimension(1), output->info()->dimension(1)));
358 
359  f(in_window, out_window, input, output, 1, op);
360  }
361  static void reduceZ(const Window &window, const ITensor *input, ITensor *output, F f, const ReductionOperation op)
362  {
363  // Set in window
364  Window in_window(window);
365  Window out_window(window);
366 
367  in_window.set(Window::DimZ, Window::Dimension(0, 1, 1));
368  out_window.set(Window::DimZ, Window::Dimension(0, output->info()->dimension(2), output->info()->dimension(2)));
369 
370  f(in_window, out_window, input, output, 2, op);
371  }
372  static void reduceW(const Window &window, const ITensor *input, ITensor *output, F f, const ReductionOperation op)
373  {
374  // Set in/out window
375  Window in_window(window);
376  Window out_window(window);
377 
378  in_window.set(3, Window::Dimension(0, 1, 1));
379  out_window.set(3, Window::Dimension(0, 1, 1));
380 
381  f(in_window, out_window, input, output, 3, op);
382  }
383 };
384 
385 template <typename T, int S>
386 struct RedOpX
387 {
388  /** SIMD vector tag type. */
389  using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
390 
391  inline void operator()(const Window &in_window, Window &out_window, const ITensor *in, ITensor *out, const ReductionOperation op)
392  {
393  const size_t input_dim_0 = in->info()->dimension(0);
394  const int window_step_x = 16 / sizeof(T);
395  const auto window_start_x = static_cast<int>(in_window.x().start());
396  const auto window_end_x = static_cast<int>(in_window.x().end());
397 
398  Window in_win_no_pad = in_window;
399  in_win_no_pad.set(Window::DimX, Window::Dimension(0, 1, 1));
400 
401  Iterator input(in, in_win_no_pad);
402  Iterator output(out, out_window);
403 
404  execute_window_loop(in_win_no_pad, [&](const Coordinates &)
405  {
406  const auto input_ptr = reinterpret_cast<const T *>(input.ptr());
407 
408  auto init_res_value = static_cast<T>(0.f);
409  switch(op)
410  {
415  {
416  init_res_value = static_cast<T>(*input_ptr);
417  break;
418  }
420  {
421  init_res_value = static_cast<T>(1.f);
422  break;
423  }
424  default:
425  break;
426  }
427  auto vec_res_value = wrapper::vdup_n(init_res_value, ExactTagType{});
428  uint32x4x4_t vec_res_idx{ { 0 } };
429 
430  // Compute window_step_x elements per iteration
431  int x = window_start_x;
432  for(; x <= (window_end_x - window_step_x); x += window_step_x)
433  {
434  const auto vec_elements = wrapper::vloadq(input_ptr + x);
435  switch(op)
436  {
438  vec_res_value = wrapper::vadd(wrapper::vmul(vec_elements, vec_elements), vec_res_value);
439  break;
442  vec_res_value = wrapper::vadd(vec_elements, vec_res_value);
443  break;
445  vec_res_value = wrapper::vmul(vec_elements, vec_res_value);
446  break;
448  {
449  auto temp_vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
450  vec_res_idx = calculate_index<decltype(vec_res_value)>(x, temp_vec_res_value, vec_res_value, vec_res_idx, op, 0);
451  vec_res_value = temp_vec_res_value;
452  break;
453  }
455  {
456  auto temp_vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
457  vec_res_idx = calculate_index<decltype(vec_res_value)>(x, temp_vec_res_value, vec_res_value, vec_res_idx, op, 0);
458  vec_res_value = temp_vec_res_value;
459  break;
460  }
462  {
463  vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
464  break;
465  }
467  {
468  vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
469  break;
470  }
471  default:
472  ARM_COMPUTE_ERROR("Not supported");
473  }
474  }
475 
476  switch(op)
477  {
481  {
482 #ifdef ARM_COMPUTE_DEBUG_ENABLED
483  auto res = static_cast<T>(0.f);
484  for(int i = 0; i < S; ++i)
485  {
486  res += wrapper::vgetlane(vec_res_value, i);
487  }
488 #else // ARM_COMPUTE_DEBUG_ENABLED
489  auto carry_res = wrapper::vpadd(wrapper::vgethigh(vec_res_value), wrapper::vgetlow(vec_res_value));
490  for(int i = 0; i < S / 4; ++i)
491  {
492  carry_res = wrapper::vpadd(carry_res, carry_res);
493  }
494  auto res = wrapper::vgetlane(carry_res, 0);
495 #endif // ARM_COMPUTE_DEBUG_ENABLED
497  {
498  // Compute left-over elements
499  for(; x < window_end_x; ++x)
500  {
501  res += (*(input_ptr + x)) * (*(input_ptr + x));
502  }
503  }
504  else
505  {
506  // Compute left-over elements
507  for(; x < window_end_x; ++x)
508  {
509  res += *(input_ptr + x);
510  }
511  }
512 
514  {
515  res /= input_dim_0;
516  }
517 
518  *(reinterpret_cast<T *>(output.ptr())) = res;
519  break;
520  }
522  {
523  auto carry_res = wrapper::vmul(wrapper::vgethigh(vec_res_value), wrapper::vgetlow(vec_res_value));
524  T res = 1;
525  for(int i = 0; i < S / 2; ++i)
526  {
527  res *= wrapper::vgetlane(carry_res, i);
528  }
529 
530  // Compute left-over elements
531  for(; x < window_end_x; ++x)
532  {
533  res *= *(input_ptr + x);
534  }
535 
536  *(reinterpret_cast<T *>(output.ptr())) = res;
537  break;
538  }
540  {
541  auto idx = calculate_vector_index<decltype(vec_res_value)>(vec_res_idx, vec_res_value, op);
542  auto res = static_cast<T>(wrapper::vgetlane(calculate_min(vec_res_value), 0));
543 
544  // Compute left-over elements
545  for(; x < window_end_x; ++x)
546  {
547  if(*(input_ptr + x) < res)
548  {
549  idx = x;
550  res = *(input_ptr + x);
551  }
552  }
553  *(reinterpret_cast<uint32_t *>(output.ptr())) = idx;
554  break;
555  }
557  {
558  auto idx = calculate_vector_index<decltype(vec_res_value)>(vec_res_idx, vec_res_value, op);
559  auto res = static_cast<T>(wrapper::vgetlane(calculate_max(vec_res_value), 0));
560 
561  // Compute left-over elements
562  for(; x < window_end_x; ++x)
563  {
564  if(*(input_ptr + x) > res)
565  {
566  idx = x;
567  res = *(input_ptr + x);
568  }
569  }
570  *(reinterpret_cast<uint32_t *>(output.ptr())) = idx;
571  break;
572  }
574  {
575  auto res = static_cast<T>(wrapper::vgetlane(calculate_min(vec_res_value), 0));
576 
577  // Compute left-over elements
578  for(; x < window_end_x; ++x)
579  {
580  res = *(input_ptr + x) < res ? *(input_ptr + x) : res;
581  }
582  *(reinterpret_cast<T *>(output.ptr())) = res;
583  break;
584  }
586  {
587  auto res = static_cast<T>(wrapper::vgetlane(calculate_max(vec_res_value), 0));
588 
589  // Compute left-over elements
590  for(; x < window_end_x; ++x)
591  {
592  res = *(input_ptr + x) > res ? *(input_ptr + x) : res;
593  }
594  *(reinterpret_cast<T *>(output.ptr())) = res;
595  break;
596  }
597  default:
598  ARM_COMPUTE_ERROR("Not supported");
599  }
600  },
601  input, output);
602  }
603 };
604 
605 template <typename T>
606 struct RedOpX_quantized
607 {
608  inline void operator()(const Window &in_window, Window &out_window, const ITensor *in, ITensor *out, const ReductionOperation op)
609  {
611 
612  const TensorInfo in_info = *(in->info());
613  const UniformQuantizationInfo iq_info = in_info.quantization_info().uniform();
614 
615  const int window_step_x = 16 / sizeof(T);
616  const auto window_start_x = static_cast<int>(in_window.x().start());
617  const auto window_end_x = static_cast<int>(in_window.x().end());
618 
619  Window in_win_no_pad = in_window;
620  in_win_no_pad.set(Window::DimX, Window::Dimension(0, 1, 1));
621 
622  Iterator input(in, in_win_no_pad);
623  Iterator output(out, out_window);
624 
625  execute_window_loop(in_win_no_pad, [&](const Coordinates &)
626  {
627  const auto input_ptr = reinterpret_cast<T *>(input.ptr());
628 
629  auto vec_res_value1 = wrapper::vdup_n(static_cast<PromotedType>(0.f), wrapper::traits::vector_128_tag{});
630  auto vec_res_value2 = wrapper::vdup_n(static_cast<PromotedType>(0.f), wrapper::traits::vector_128_tag{});
631  auto vec_res_value3 = wrapper::vdup_n(static_cast<PromotedType>(0.f), wrapper::traits::vector_128_tag{});
632  auto vec_res_value4 = wrapper::vdup_n(static_cast<PromotedType>(0.f), wrapper::traits::vector_128_tag{});
633 
634  auto vec_res_value1_f = vdupq_n_f32(static_cast<float>(1.f));
635  auto vec_res_value2_f = vdupq_n_f32(static_cast<float>(1.f));
636  auto vec_res_value3_f = vdupq_n_f32(static_cast<float>(1.f));
637  auto vec_res_value4_f = vdupq_n_f32(static_cast<float>(1.f));
638 
639  typename wrapper::traits::neon_vector<T, 16>::type vec_res_value = { 0 };
640 
642  {
643  vec_res_value = wrapper::vdup_n(*input_ptr, wrapper::traits::vector_128_tag{});
644  }
645 
646  uint32x4x4_t vec_res_idx{ { 0 } };
647  // Compute window_step_x elements per iteration
648  int x = window_start_x;
649  for(; x <= (window_end_x - window_step_x); x += window_step_x)
650  {
651  const auto vec_elements = wrapper::vloadq(input_ptr + x);
652  switch(op)
653  {
656  {
657  const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements));
658  const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements));
659 
660  const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1));
661  const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1));
662  const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2));
663  const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2));
664 
665  vec_res_value1 = wrapper::vadd(temp32x4t_1, vec_res_value1);
666  vec_res_value2 = wrapper::vadd(temp32x4t_2, vec_res_value2);
667  vec_res_value3 = wrapper::vadd(temp32x4t_3, vec_res_value3);
668  vec_res_value4 = wrapper::vadd(temp32x4t_4, vec_res_value4);
669  break;
670  }
672  {
673  const auto offset32x4f_4 = vdupq_n_f32(iq_info.offset);
674  const auto scale32x4f_4 = vdupq_n_f32(iq_info.scale);
675 
676  const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements));
677  const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements));
678 
679  const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1));
680  const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1));
681  const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2));
682  const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2));
683 
684  auto temp32x4f_1 = wrapper::vcvt<float>(temp32x4t_1);
685  auto temp32x4f_2 = wrapper::vcvt<float>(temp32x4t_2);
686  auto temp32x4f_3 = wrapper::vcvt<float>(temp32x4t_3);
687  auto temp32x4f_4 = wrapper::vcvt<float>(temp32x4t_4);
688 
689  //de-quantize vec_elements
690  temp32x4f_1 = vmulq_f32(vsubq_f32(temp32x4f_1, offset32x4f_4), scale32x4f_4);
691  temp32x4f_2 = vmulq_f32(vsubq_f32(temp32x4f_2, offset32x4f_4), scale32x4f_4);
692  temp32x4f_3 = vmulq_f32(vsubq_f32(temp32x4f_3, offset32x4f_4), scale32x4f_4);
693  temp32x4f_4 = vmulq_f32(vsubq_f32(temp32x4f_4, offset32x4f_4), scale32x4f_4);
694 
695  vec_res_value1_f = vmulq_f32(temp32x4f_1, vec_res_value1_f);
696  vec_res_value2_f = vmulq_f32(temp32x4f_2, vec_res_value2_f);
697  vec_res_value3_f = vmulq_f32(temp32x4f_3, vec_res_value3_f);
698  vec_res_value4_f = vmulq_f32(temp32x4f_4, vec_res_value4_f);
699  break;
700  }
702  {
703  auto temp_vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
704  vec_res_idx = calculate_index_quantized<decltype(vec_res_value)>(x, temp_vec_res_value, vec_res_value, vec_res_idx, op, 0);
705  vec_res_value = temp_vec_res_value;
706  break;
707  }
709  {
710  auto temp_vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
711  vec_res_idx = calculate_index_quantized<decltype(vec_res_value)>(x, temp_vec_res_value, vec_res_value, vec_res_idx, op, 0);
712  vec_res_value = temp_vec_res_value;
713  break;
714  }
716  {
717  vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
718  break;
719  }
721  {
722  vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
723  break;
724  }
725  default:
726  ARM_COMPUTE_ERROR("Not supported");
727  }
728  }
729 
730  switch(op)
731  {
733  {
734  auto idx = calculate_vector_index_quantized<decltype(vec_res_value)>(vec_res_idx, vec_res_value, op);
735  auto res = static_cast<T>(wrapper::vgetlane(calculate_min(vec_res_value), 0));
736 
737  // Compute left-over elements
738  for(; x < window_end_x; ++x)
739  {
740  if(*(input_ptr + x) < res)
741  {
742  idx = x;
743  res = *(input_ptr + x);
744  }
745  }
746  *(reinterpret_cast<uint32_t *>(output.ptr())) = idx;
747  break;
748  }
750  {
751  auto idx = calculate_vector_index_quantized<decltype(vec_res_value)>(vec_res_idx, vec_res_value, op);
752  auto res = static_cast<T>(wrapper::vgetlane(calculate_max(vec_res_value), 0));
753 
754  // Compute left-over elements
755  for(; x < window_end_x; ++x)
756  {
757  if(*(input_ptr + x) > res)
758  {
759  idx = x;
760  res = *(input_ptr + x);
761  }
762  }
763  *(reinterpret_cast<uint32_t *>(output.ptr())) = idx;
764  break;
765  }
767  {
768  auto res = static_cast<T>(wrapper::vgetlane(calculate_min(vec_res_value), 0));
769 
770  // Compute left-over elements
771  for(; x < window_end_x; ++x)
772  {
773  res = *(input_ptr + x) < res ? *(input_ptr + x) : res;
774  }
775  *(reinterpret_cast<T *>(output.ptr())) = res;
776  break;
777  }
779  {
780  auto res = static_cast<T>(wrapper::vgetlane(calculate_max(vec_res_value), 0));
781 
782  // Compute left-over elements
783  for(; x < window_end_x; ++x)
784  {
785  res = *(input_ptr + x) > res ? *(input_ptr + x) : res;
786  }
787  *(reinterpret_cast<T *>(output.ptr())) = res;
788  break;
789  }
791  {
792  auto carry_res = wrapper::vmul(vec_res_value1_f, vec_res_value2_f);
793  carry_res = wrapper::vmul(carry_res, vec_res_value3_f);
794  carry_res = wrapper::vmul(carry_res, vec_res_value4_f);
795 
796  float res = wrapper::vgetlane(carry_res, 0);
797  res *= wrapper::vgetlane(carry_res, 1);
798  res *= wrapper::vgetlane(carry_res, 2);
799  res *= wrapper::vgetlane(carry_res, 3);
800 
801  // Compute left-over elements
802  for(; x < window_end_x; ++x)
803  {
804  //de-quantize input
805  if(std::is_same<T, uint8_t>::value)
806  {
807  res *= dequantize_qasymm8(*(input_ptr + x), iq_info);
808  }
809  else
810  {
811  res *= dequantize_qasymm8_signed(*(input_ptr + x), iq_info);
812  }
813  }
814 
815  //re-quantize result
816  if(std::is_same<T, uint8_t>::value)
817  {
818  res = quantize_qasymm8(res, iq_info);
819  }
820  else
821  {
822  res = quantize_qasymm8_signed(res, iq_info);
823  }
824 
825  *reinterpret_cast<T *>(output.ptr()) = static_cast<T>(res);
826  break;
827  }
830  {
831  auto carry_res = wrapper::vadd(vec_res_value1, vec_res_value2);
832  carry_res = wrapper::vadd(carry_res, vec_res_value3);
833  carry_res = wrapper::vadd(carry_res, vec_res_value4);
834 
835  auto carry_paddition = wrapper::vpadd(wrapper::vgethigh(carry_res), wrapper::vgetlow(carry_res));
836  carry_paddition = wrapper::vpadd(carry_paddition, carry_paddition);
837  auto res = static_cast<int32_t>(wrapper::vgetlane(carry_paddition, 0));
838 
839  // Compute left-over elements
840  for(; x < window_end_x; ++x)
841  {
842  res += *(input_ptr + x);
843  }
844 
846  {
847  res /= static_cast<int32_t>(in_info.dimension(0));
848  }
849  else
850  {
851  // Subtract accumulated offsets
852  res -= (in_info.dimension(0) - 1) * iq_info.offset;
853  }
854  *reinterpret_cast<T *>(output.ptr()) = utils::cast::saturate_cast<T>(res);
855  break;
856  }
857  default:
858  ARM_COMPUTE_ERROR("Not supported");
859  }
860  },
861  input, output);
862  }
863 };
864 
865 template <typename T, int S>
866 struct RedOpYZW
867 {
868  /** SIMD vector tag type. */
869  using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
870  using neon_vector = typename wrapper::traits::neon_vector<T, S>::type;
871 
872  inline void operator()(const Window &in_window, Window &out_window, const ITensor *in, ITensor *out, int axis, const ReductionOperation op)
873  {
874  const TensorInfo in_info = *(in->info());
875  const int window_step_x = 16 / sizeof(T);
876  const auto window_start_x_tmp = static_cast<int>(in_window.x().start());
877  const auto window_end_x_tmp = static_cast<int>(in_window.x().end());
878  // As it split over x-axis, need to set the correct spiltted window start and end.
879  const auto window_start_x = static_cast<int>(0);
880  const auto window_end_x = static_cast<int>(in_window.shape().x());
881 
882  Window in_win_no_pad = in_window;
883  in_win_no_pad.set(Window::DimX, Window::Dimension(window_start_x_tmp, window_end_x_tmp, in_window.shape().x()));
884  Window out_win_no_pad = out_window;
885  out_win_no_pad.set(Window::DimX, Window::Dimension(window_start_x_tmp, window_end_x_tmp, out_window.shape().x()));
886 
887  Iterator input(in, in_win_no_pad);
888  Iterator output(out, out_win_no_pad);
889 
890  execute_window_loop(in_win_no_pad, [&](const Coordinates &)
891  {
892  const auto input_ptr = reinterpret_cast<T *>(input.ptr());
893 
894  // Compute window_step_x elements per iteration
895  int x = window_start_x;
896  for(; x <= (window_end_x - window_step_x); x += window_step_x)
897  {
898  neon_vector vec_res_value = { 0 };
899  switch(op)
900  {
905  {
906  vec_res_value = wrapper::vloadq(input_ptr + x);
907  break;
908  }
910  {
911  vec_res_value = wrapper::vdup_n(static_cast<T>(1.f), ExactTagType{});
912  break;
913  }
914  default:
915  {
916  vec_res_value = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
917  break;
918  }
919  }
920  uint32x4x4_t vec_res_idx{ { 0 } };
921 
922  for(unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
923  {
924  const T *in_ptr = reinterpret_cast<T *>(input.ptr() + x * sizeof(T) + in_info.strides_in_bytes()[axis] * dim);
925  const auto vec_elements = wrapper::vloadq(in_ptr);
926  switch(op)
927  {
930  vec_res_value = wrapper::vadd(vec_elements, vec_res_value);
931  break;
933  vec_res_value = wrapper::vadd(wrapper::vmul(vec_elements, vec_elements), vec_res_value);
934  break;
936  vec_res_value = wrapper::vmul(vec_elements, vec_res_value);
937  break;
939  {
940  auto temp_vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
941  vec_res_idx = calculate_index(dim, temp_vec_res_value, vec_res_value, vec_res_idx, op, axis);
942  vec_res_value = temp_vec_res_value;
943  break;
944  }
946  {
947  auto temp_vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
948  vec_res_idx = calculate_index(dim, temp_vec_res_value, vec_res_value, vec_res_idx, op, axis);
949  vec_res_value = temp_vec_res_value;
950  break;
951  }
953  {
954  vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
955  break;
956  }
958  {
959  vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
960  break;
961  }
962  default:
963  ARM_COMPUTE_ERROR("Not supported");
964  }
965  }
966 
968  {
969  auto vec_width_inv = wrapper::vinv(wrapper::vdup_n(static_cast<T>(in_info.dimension(axis)), ExactTagType{}));
970  vec_res_value = wrapper::vmul(vec_res_value, vec_width_inv);
971  }
972 
974  {
975  wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr()) + x, vec_res_idx.val[0]);
976 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
977  if(std::is_same<T, float16_t>::value)
978  {
979  wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr()) + x + 4, vec_res_idx.val[1]);
980  }
981 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
982  }
983  else
984  {
985  wrapper::vstore(reinterpret_cast<T *>(output.ptr() + x * sizeof(T)), vec_res_value);
986  }
987  }
988 
989  // Compute left-over elements
990  for(; x < window_end_x; ++x)
991  {
992  auto res_value = 0.f;
993  switch(op)
994  {
999  {
1000  res_value = *(input_ptr + x);
1001  break;
1002  }
1004  {
1005  res_value = static_cast<T>(1.f);
1006  break;
1007  }
1008  default:
1009  {
1010  res_value = static_cast<T>(0.f);
1011  break;
1012  }
1013  }
1014 
1015  uint32_t res_idx = 0;
1016  for(unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
1017  {
1018  const T *in_ptr = reinterpret_cast<T *>(input.ptr() + x * sizeof(T) + in_info.strides_in_bytes()[axis] * dim);
1019 
1020  switch(op)
1021  {
1024  res_value += *in_ptr;
1025  break;
1027  res_value += *in_ptr * *in_ptr;
1028  break;
1030  res_value *= *in_ptr;
1031  break;
1033  {
1034  if(*in_ptr < res_value)
1035  {
1036  res_value = *in_ptr;
1037  res_idx = dim;
1038  }
1039  break;
1040  }
1042  {
1043  if(*in_ptr > res_value)
1044  {
1045  res_value = *in_ptr;
1046  res_idx = dim;
1047  }
1048  break;
1049  }
1051  {
1052  res_value = *in_ptr < res_value ? *in_ptr : res_value;
1053  break;
1054  }
1056  {
1057  res_value = *in_ptr > res_value ? *in_ptr : res_value;
1058  break;
1059  }
1060  default:
1061  ARM_COMPUTE_ERROR("Not supported");
1062  }
1063  }
1064 
1066  {
1067  res_value /= in_info.dimension(axis);
1068  }
1069 
1071  {
1072  *(reinterpret_cast<uint32_t *>(output.ptr()) + x) = res_idx;
1073  }
1074  else
1075  {
1076  *(reinterpret_cast<T *>(output.ptr() + x * sizeof(T))) = res_value;
1077  }
1078  }
1079  },
1080  input, output);
1081  }
1082 };
1083 
1084 template <typename T, int S, int axis, ReductionOperation op>
1085 struct RedOpYZW_complex
1086 {
1087  /** SIMD vector tag type. */
1088  using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
1089  using neon_vector = typename wrapper::traits::neon_vector<T, S>::type;
1090 
1091  inline void operator()(const Window &in_window, Window &out_window, const ITensor *in, ITensor *out, int, const ReductionOperation)
1092  {
1093  ARM_COMPUTE_ERROR_ON(axis != 2);
1095 
1096  const TensorInfo in_info = *(in->info());
1097  const size_t stride_z = in_info.strides_in_bytes()[axis];
1098  const int window_step_x = 16 / sizeof(T);
1099  const auto window_start_x_tmp = static_cast<int>(in_window.x().start());
1100  const auto window_end_x_tmp = static_cast<int>(in_window.x().end());
1101  // As it split over x-axis, need to set the correct spiltted window start and end.
1102  const auto window_start_x = static_cast<int>(0);
1103  const auto window_end_x = static_cast<int>(in_window.shape().x());
1104 
1105  Window in_win_no_pad = in_window;
1106  in_win_no_pad.set(Window::DimX, Window::Dimension(window_start_x_tmp, window_end_x_tmp, in_window.shape().x()));
1107  Window out_win_no_pad = out_window;
1108  out_win_no_pad.set(Window::DimX, Window::Dimension(window_start_x_tmp, window_end_x_tmp, out_window.shape().x()));
1109 
1110  Iterator input(in, in_win_no_pad);
1111  Iterator output(out, out_win_no_pad);
1112 
1113  execute_window_loop(in_win_no_pad, [&](const Coordinates &)
1114  {
1115  // Compute window_step_x elements per iteration
1116  int x = window_start_x;
1117  for(; x <= (window_end_x - window_step_x); x += window_step_x)
1118  {
1119  neon_vector vec_res_value_0 = { 0 };
1120  neon_vector vec_res_value_1 = { 0 };
1121 
1122  vec_res_value_0 = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
1123  vec_res_value_1 = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
1124 
1125  T *out_ptr = reinterpret_cast<T *>(output.ptr() + 2 * x * sizeof(T));
1126  for(unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
1127  {
1128  T *in_ptr_0 = reinterpret_cast<T *>(input.ptr() + 2 * x * sizeof(T) + stride_z * dim);
1129  T *in_ptr_1 = reinterpret_cast<T *>(input.ptr() + 2 * x * sizeof(T) + 16 + stride_z * dim);
1130 
1131  const auto vec_elements_0 = wrapper::vloadq(in_ptr_0);
1132  const auto vec_elements_1 = wrapper::vloadq(in_ptr_1);
1133 
1134  vec_res_value_0 = wrapper::vadd(vec_elements_0, vec_res_value_0);
1135  vec_res_value_1 = wrapper::vadd(vec_elements_1, vec_res_value_1);
1136  }
1137 
1138  wrapper::vstore(out_ptr, vec_res_value_0);
1139  wrapper::vstore(out_ptr + 4, vec_res_value_1);
1140  }
1141 
1142  // Compute left-over elements
1143  for(; x < window_end_x; ++x)
1144  {
1145  auto res_value_0 = 0.f;
1146  auto res_value_1 = 0.f;
1147 
1148  T *out_ptr = reinterpret_cast<T *>(output.ptr() + 2 * x * sizeof(T));
1149  for(unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
1150  {
1151  T *in_ptr = reinterpret_cast<T *>(input.ptr() + 2 * x * sizeof(T) + stride_z * dim);
1152  res_value_0 += *in_ptr;
1153  res_value_1 += *(in_ptr + 1);
1154  }
1155  *out_ptr = res_value_0;
1156  *(out_ptr + 1) = res_value_1;
1157  }
1158  },
1159  input, output);
1160  }
1161 };
1162 
1163 template <typename T>
1164 struct RedOpYZW_quantized
1165 {
1166  inline void operator()(const Window &in_window, Window &out_window, const ITensor *in, ITensor *out, int axis, const ReductionOperation op)
1167  {
1168  const TensorInfo in_info = *(in->info());
1169  const UniformQuantizationInfo iq_info = in_info.quantization_info().uniform();
1171 
1172  const int window_step_x = 16 / sizeof(T);
1173  const auto window_start_x_tmp = static_cast<int>(in_window.x().start());
1174  const auto window_end_x_tmp = static_cast<int>(in_window.x().end());
1175  // As it split over x-axis, need to set the correct spiltted window start and end.
1176  const auto window_start_x = static_cast<int>(0);
1177  const auto window_end_x = static_cast<int>(in_window.shape().x());
1178 
1179  Window in_win_no_pad = in_window;
1180  in_win_no_pad.set(Window::DimX, Window::Dimension(window_start_x_tmp, window_end_x_tmp, in_window.shape().x()));
1181  Window out_win_no_pad = out_window;
1182  out_win_no_pad.set(Window::DimX, Window::Dimension(window_start_x_tmp, window_end_x_tmp, out_window.shape().x()));
1183 
1184  Iterator input(in, in_win_no_pad);
1185  Iterator output(out, out_win_no_pad);
1186 
1188  using vector_type_f = typename wrapper::traits::neon_vector<float, 4>::type;
1189 
1190  vector_type vec_res_value1{};
1191  vector_type vec_res_value2{};
1192  vector_type vec_res_value3{};
1193  vector_type vec_res_value4{};
1194 
1195  vector_type_f vec_res_value1_f{};
1196  vector_type_f vec_res_value2_f{};
1197  vector_type_f vec_res_value3_f{};
1198  vector_type_f vec_res_value4_f{};
1199 
1200  execute_window_loop(in_win_no_pad, [&](const Coordinates &)
1201  {
1202  const auto input_ptr = reinterpret_cast<T *>(input.ptr());
1203 
1204  // Compute window_step_x elements per iteration
1205  int x = window_start_x;
1206  for(; x <= (window_end_x - window_step_x); x += window_step_x)
1207  {
1208  uint32x4x4_t vec_res_idx{ { 0 } };
1209  vec_res_value1 = wrapper::vdup_n(static_cast<PromotedType>(0), wrapper::traits::vector_128_tag{});
1210  vec_res_value2 = wrapper::vdup_n(static_cast<PromotedType>(0), wrapper::traits::vector_128_tag{});
1211  vec_res_value3 = wrapper::vdup_n(static_cast<PromotedType>(0), wrapper::traits::vector_128_tag{});
1212  vec_res_value4 = wrapper::vdup_n(static_cast<PromotedType>(0), wrapper::traits::vector_128_tag{});
1213 
1214  vec_res_value1_f = wrapper::vdup_n(static_cast<float>(1), wrapper::traits::vector_128_tag{});
1215  vec_res_value2_f = wrapper::vdup_n(static_cast<float>(1), wrapper::traits::vector_128_tag{});
1216  vec_res_value3_f = wrapper::vdup_n(static_cast<float>(1), wrapper::traits::vector_128_tag{});
1217  vec_res_value4_f = wrapper::vdup_n(static_cast<float>(1), wrapper::traits::vector_128_tag{});
1218 
1219  auto vec_res_value = wrapper::vloadq(input_ptr + x);
1220 
1221  for(unsigned int index_dim = 0; index_dim < in_info.dimension(axis); ++index_dim)
1222  {
1223  const T *in_ptr = input_ptr + x + in_info.strides_in_bytes()[axis] * index_dim;
1224  const auto vec_elements = wrapper::vloadq(in_ptr);
1225  switch(op)
1226  {
1229  {
1230  const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements));
1231  const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements));
1232 
1233  const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1));
1234  const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1));
1235  const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2));
1236  const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2));
1237 
1238  vec_res_value1 = wrapper::vadd(temp32x4t_1, vec_res_value1);
1239  vec_res_value2 = wrapper::vadd(temp32x4t_2, vec_res_value2);
1240  vec_res_value3 = wrapper::vadd(temp32x4t_3, vec_res_value3);
1241  vec_res_value4 = wrapper::vadd(temp32x4t_4, vec_res_value4);
1242  break;
1243  }
1245  {
1246  const auto offset32x4f_4 = wrapper::vdup_n(static_cast<float>(iq_info.offset), wrapper::traits::vector_128_tag{});
1247  const auto scale32x4f_4 = wrapper::vdup_n(iq_info.scale, wrapper::traits::vector_128_tag{});
1248 
1249  const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements));
1250  const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements));
1251 
1252  const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1));
1253  const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1));
1254  const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2));
1255  const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2));
1256 
1257  auto temp32x4f_1 = wrapper::vcvt<float>(temp32x4t_1);
1258  auto temp32x4f_2 = wrapper::vcvt<float>(temp32x4t_2);
1259  auto temp32x4f_3 = wrapper::vcvt<float>(temp32x4t_3);
1260  auto temp32x4f_4 = wrapper::vcvt<float>(temp32x4t_4);
1261 
1262  //de-quantize vec_elements
1263  temp32x4f_1 = wrapper::vmul(wrapper::vsub(temp32x4f_1, offset32x4f_4), scale32x4f_4);
1264  temp32x4f_2 = wrapper::vmul(wrapper::vsub(temp32x4f_2, offset32x4f_4), scale32x4f_4);
1265  temp32x4f_3 = wrapper::vmul(wrapper::vsub(temp32x4f_3, offset32x4f_4), scale32x4f_4);
1266  temp32x4f_4 = wrapper::vmul(wrapper::vsub(temp32x4f_4, offset32x4f_4), scale32x4f_4);
1267 
1268  vec_res_value1_f = wrapper::vmul(temp32x4f_1, vec_res_value1_f);
1269  vec_res_value2_f = wrapper::vmul(temp32x4f_2, vec_res_value2_f);
1270  vec_res_value3_f = wrapper::vmul(temp32x4f_3, vec_res_value3_f);
1271  vec_res_value4_f = wrapper::vmul(temp32x4f_4, vec_res_value4_f);
1272  break;
1273  }
1275  {
1276  auto temp_vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
1277  vec_res_idx = calculate_index_quantized(index_dim, temp_vec_res_value, vec_res_value, vec_res_idx, op, axis);
1278  vec_res_value = temp_vec_res_value;
1279  break;
1280  }
1282  {
1283  auto temp_vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
1284  vec_res_idx = calculate_index_quantized(index_dim, temp_vec_res_value, vec_res_value, vec_res_idx, op, axis);
1285  vec_res_value = temp_vec_res_value;
1286  break;
1287  }
1289  {
1290  vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
1291  break;
1292  }
1294  {
1295  vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
1296  break;
1297  }
1298  default:
1299  ARM_COMPUTE_ERROR("Not supported");
1300  }
1301  }
1302 
1303  switch(op)
1304  {
1307  {
1308  wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr() + 4 * x), vec_res_idx.val[0]);
1309  wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr() + 4 * x) + 4, vec_res_idx.val[1]);
1310  wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr() + 4 * x) + 8, vec_res_idx.val[2]);
1311  wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr() + 4 * x) + 12, vec_res_idx.val[3]);
1312  break;
1313  }
1316  {
1317  wrapper::vstore(reinterpret_cast<T *>(output.ptr() + x), vec_res_value);
1318  break;
1319  }
1321  {
1322  // Subtract offsets
1323  auto offsets = vdupq_n_s32((in_info.dimension(axis) - 1) * iq_info.offset);
1324 
1325  auto vec_res_s_value1 = wrapper::vreinterpret(vec_res_value1);
1326  auto vec_res_s_value2 = wrapper::vreinterpret(vec_res_value2);
1327  auto vec_res_s_value3 = wrapper::vreinterpret(vec_res_value3);
1328  auto vec_res_s_value4 = wrapper::vreinterpret(vec_res_value4);
1329 
1330  vec_res_s_value1 = wrapper::vsub(vec_res_s_value1, offsets);
1331  vec_res_s_value2 = wrapper::vsub(vec_res_s_value2, offsets);
1332  vec_res_s_value3 = wrapper::vsub(vec_res_s_value3, offsets);
1333  vec_res_s_value4 = wrapper::vsub(vec_res_s_value4, offsets);
1334 
1335  const auto temp16x8t_1 = wrapper::vcombine(wrapper::vqmovn(vec_res_s_value1), wrapper::vqmovn(vec_res_s_value2));
1336  const auto temp16x8t_2 = wrapper::vcombine(wrapper::vqmovn(vec_res_s_value3), wrapper::vqmovn(vec_res_s_value4));
1337 
1338  combine_and_store<T>(temp16x8t_1, temp16x8t_2, output, x);
1339  break;
1340  }
1342  {
1343  const auto vec_width_inv = wrapper::vinv(wrapper::vdup_n(static_cast<float>(in_info.dimension(axis)), wrapper::traits::vector_128_tag{}));
1344  vec_res_value1_f = wrapper::vmul(wrapper::vcvt<float>(vec_res_value1), vec_width_inv);
1345  vec_res_value2_f = wrapper::vmul(wrapper::vcvt<float>(vec_res_value2), vec_width_inv);
1346  vec_res_value3_f = wrapper::vmul(wrapper::vcvt<float>(vec_res_value3), vec_width_inv);
1347  vec_res_value4_f = wrapper::vmul(wrapper::vcvt<float>(vec_res_value4), vec_width_inv);
1348 
1349  vec_res_value1 = wrapper::vcvt<T>(vec_res_value1_f);
1350  vec_res_value2 = wrapper::vcvt<T>(vec_res_value2_f);
1351  vec_res_value3 = wrapper::vcvt<T>(vec_res_value3_f);
1352  vec_res_value4 = wrapper::vcvt<T>(vec_res_value4_f);
1353 
1354  const auto temp16x8t_1 = wrapper::vcombine(wrapper::vqmovn(vec_res_value1), wrapper::vqmovn(vec_res_value2));
1355  const auto temp16x8t_2 = wrapper::vcombine(wrapper::vqmovn(vec_res_value3), wrapper::vqmovn(vec_res_value4));
1356  auto res = wrapper::vcombine(wrapper::vqmovn(temp16x8t_1), wrapper::vqmovn(temp16x8t_2));
1357 
1358  wrapper::vstore(reinterpret_cast<T *>(output.ptr() + x), res);
1359  break;
1360  }
1362  {
1363  const auto offset32x4f_4 = wrapper::vdup_n(static_cast<float>(iq_info.offset), wrapper::traits::vector_128_tag{});
1364  const auto iscale32x4f_4 = vinvq_f32(vdupq_n_f32(iq_info.scale));
1365 
1366  //re-quantize
1367  vec_res_value1_f = wrapper::vadd(wrapper::vmul(vec_res_value1_f, iscale32x4f_4), offset32x4f_4);
1368  vec_res_value2_f = wrapper::vadd(wrapper::vmul(vec_res_value2_f, iscale32x4f_4), offset32x4f_4);
1369  vec_res_value3_f = wrapper::vadd(wrapper::vmul(vec_res_value3_f, iscale32x4f_4), offset32x4f_4);
1370  vec_res_value4_f = wrapper::vadd(wrapper::vmul(vec_res_value4_f, iscale32x4f_4), offset32x4f_4);
1371 
1372  vec_res_value1 = wrapper::vcvt<T>(vec_res_value1_f);
1373  vec_res_value2 = wrapper::vcvt<T>(vec_res_value2_f);
1374  vec_res_value3 = wrapper::vcvt<T>(vec_res_value3_f);
1375  vec_res_value4 = wrapper::vcvt<T>(vec_res_value4_f);
1376 
1377  const auto temp16x8t_1 = wrapper::vcombine(wrapper::vqmovn(vec_res_value1), wrapper::vqmovn(vec_res_value2));
1378  const auto temp16x8t_2 = wrapper::vcombine(wrapper::vqmovn(vec_res_value3), wrapper::vqmovn(vec_res_value4));
1379  auto res = wrapper::vcombine(wrapper::vqmovn(temp16x8t_1), wrapper::vqmovn(temp16x8t_2));
1380 
1381  wrapper::vstore(reinterpret_cast<T *>(output.ptr() + x), res);
1382  break;
1383  }
1384  default:
1385  ARM_COMPUTE_ERROR("Not supported");
1386  }
1387  }
1388 
1389  // Compute left-over elements
1390  for(; x < window_end_x; ++x)
1391  {
1392  float res_value = 0.f;
1393  switch(op)
1394  {
1399  {
1400  res_value = *(input_ptr + x);
1401  break;
1402  }
1404  {
1405  res_value = static_cast<T>(1.0f);
1406  break;
1407  }
1408  default:
1409  {
1410  res_value = static_cast<T>(0.0f);
1411  break;
1412  }
1413  }
1414  uint32_t res_idx = 0;
1415 
1416  for(unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
1417  {
1418  const T *in_ptr = reinterpret_cast<T *>(input.ptr() + x + in_info.strides_in_bytes()[axis] * dim);
1419  switch(op)
1420  {
1423  {
1424  res_value += *in_ptr;
1425  break;
1426  }
1428  {
1429  res_value += *in_ptr * *in_ptr;
1430  break;
1431  }
1433  {
1434  //de-quantize input
1435  if(std::is_same<T, uint8_t>::value)
1436  {
1437  res_value *= dequantize_qasymm8(*in_ptr, iq_info);
1438  }
1439  else
1440  {
1441  res_value *= dequantize_qasymm8_signed(*in_ptr, iq_info);
1442  }
1443  break;
1444  }
1446  {
1447  if(*in_ptr < res_value)
1448  {
1449  res_value = *in_ptr;
1450  res_idx = dim;
1451  }
1452  break;
1453  }
1455  {
1456  if(*in_ptr > res_value)
1457  {
1458  res_value = *in_ptr;
1459  res_idx = dim;
1460  }
1461  break;
1462  }
1464  {
1465  res_value = *in_ptr < res_value ? *in_ptr : res_value;
1466  break;
1467  }
1469  {
1470  res_value = *in_ptr > res_value ? *in_ptr : res_value;
1471  break;
1472  }
1473  default:
1474  ARM_COMPUTE_ERROR("Not supported");
1475  }
1476  }
1477 
1478  switch(op)
1479  {
1481  {
1482  int32_t res = static_cast<int32_t>(res_value);
1483  res /= static_cast<int32_t>(in_info.dimension(axis));
1484  *reinterpret_cast<T *>(output.ptr() + x) = utils::cast::saturate_cast<T>(res);
1485  break;
1486  }
1488  {
1489  // Subtract accumulated offsets
1490  res_value -= (in_info.dimension(axis) - 1) * iq_info.offset;
1491  *reinterpret_cast<T *>(output.ptr() + x) = utils::cast::saturate_cast<T>(res_value);
1492  break;
1493  }
1495  {
1496  //re-quantize result
1497  T res = 0;
1498  if(std::is_same<T, uint8_t>::value)
1499  {
1500  res = quantize_qasymm8(res_value, iq_info);
1501  }
1502  else
1503  {
1504  res = quantize_qasymm8_signed(res_value, iq_info);
1505  }
1506  *(reinterpret_cast<T *>(output.ptr() + x)) = res;
1507  break;
1508  }
1511  {
1512  *(reinterpret_cast<uint32_t *>(output.ptr() + x * 4)) = res_idx;
1513  break;
1514  }
1515  default:
1516  *(reinterpret_cast<T *>(output.ptr() + x)) = res_value;
1517  }
1518  }
1519  },
1520  input, output);
1521  }
1522 };
1523 
1524 void reduce_op(const Window &window, const ITensor *input, ITensor *output, unsigned int axis, const ReductionOperation op)
1525 {
1526  const bool is_complex = (input->info()->num_channels() == 2);
1527 
1528  if(is_complex)
1529  {
1530  switch(axis)
1531  {
1532  case 2:
1533  switch(input->info()->data_type())
1534  {
1535  case DataType::F32:
1536  switch(op)
1537  {
1539  return Reducer<RedOpYZW_complex<float, 4, 2, ReductionOperation::SUM>>::reduceZ(window, input, output, RedOpYZW_complex<float, 4, 2, ReductionOperation::SUM>(), op);
1540  default:
1541  ARM_COMPUTE_ERROR("Not supported");
1542  }
1543  default:
1544  ARM_COMPUTE_ERROR("Not supported");
1545  }
1546  default:
1547  ARM_COMPUTE_ERROR("Not supported");
1548  }
1549  return;
1550  }
1551 
1552  switch(axis)
1553  {
1554  case 0:
1555  switch(input->info()->data_type())
1556  {
1557  case DataType::QASYMM8:
1558  return Reducer<RedOpX_quantized<uint8_t>>::reduceX(window, input, output, RedOpX_quantized<uint8_t>(), op);
1560  return Reducer<RedOpX_quantized<int8_t>>::reduceX(window, input, output, RedOpX_quantized<int8_t>(), op);
1561 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
1562  case DataType::F16:
1563  return Reducer<RedOpX<float16_t, 8>>::reduceX(window, input, output, RedOpX<float16_t, 8>(), op);
1564 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
1565  case DataType::F32:
1566  return Reducer<RedOpX<float, 4>>::reduceX(window, input, output, RedOpX<float, 4>(), op);
1567  case DataType::S32:
1568  return Reducer<RedOpX<int32_t, 4>>::reduceX(window, input, output, RedOpX<int32_t, 4>(), op);
1569  default:
1570  ARM_COMPUTE_ERROR("Not supported");
1571  }
1572  case 1:
1573  switch(input->info()->data_type())
1574  {
1575  case DataType::QASYMM8:
1576  return Reducer<RedOpYZW_quantized<uint8_t>>::reduceY(window, input, output, RedOpYZW_quantized<uint8_t>(), op);
1578  return Reducer<RedOpYZW_quantized<int8_t>>::reduceY(window, input, output, RedOpYZW_quantized<int8_t>(), op);
1579 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
1580  case DataType::F16:
1581  return Reducer<RedOpYZW<float16_t, 8>>::reduceY(window, input, output, RedOpYZW<float16_t, 8>(), op);
1582 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
1583  case DataType::F32:
1584  return Reducer<RedOpYZW<float, 4>>::reduceY(window, input, output, RedOpYZW<float, 4>(), op);
1585  case DataType::S32:
1586  return Reducer<RedOpYZW<int32_t, 4>>::reduceY(window, input, output, RedOpYZW<int32_t, 4>(), op);
1587  default:
1588  ARM_COMPUTE_ERROR("Not supported");
1589  }
1590  case 2:
1591  switch(input->info()->data_type())
1592  {
1593  case DataType::QASYMM8:
1594  return Reducer<RedOpYZW_quantized<uint8_t>>::reduceZ(window, input, output, RedOpYZW_quantized<uint8_t>(), op);
1596  return Reducer<RedOpYZW_quantized<int8_t>>::reduceZ(window, input, output, RedOpYZW_quantized<int8_t>(), op);
1597 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
1598  case DataType::F16:
1599  return Reducer<RedOpYZW<float16_t, 8>>::reduceZ(window, input, output, RedOpYZW<float16_t, 8>(), op);
1600 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
1601  case DataType::F32:
1602  return Reducer<RedOpYZW<float, 4>>::reduceZ(window, input, output, RedOpYZW<float, 4>(), op);
1603  case DataType::S32:
1604  return Reducer<RedOpYZW<int32_t, 4>>::reduceZ(window, input, output, RedOpYZW<int32_t, 4>(), op);
1605  default:
1606  ARM_COMPUTE_ERROR("Not supported");
1607  }
1608  case 3:
1609  switch(input->info()->data_type())
1610  {
1611  case DataType::QASYMM8:
1612  return Reducer<RedOpYZW_quantized<uint8_t>>::reduceW(window, input, output, RedOpYZW_quantized<uint8_t>(), op);
1614  return Reducer<RedOpYZW_quantized<int8_t>>::reduceW(window, input, output, RedOpYZW_quantized<int8_t>(), op);
1615 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
1616  case DataType::F16:
1617  return Reducer<RedOpYZW<float16_t, 8>>::reduceW(window, input, output, RedOpYZW<float16_t, 8>(), op);
1618 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
1619  case DataType::F32:
1620  return Reducer<RedOpYZW<float, 4>>::reduceW(window, input, output, RedOpYZW<float, 4>(), op);
1621  case DataType::S32:
1622  return Reducer<RedOpYZW<int32_t, 4>>::reduceW(window, input, output, RedOpYZW<int32_t, 4>(), op);
1623  default:
1624  ARM_COMPUTE_ERROR("Not supported");
1625  }
1626  default:
1627  ARM_COMPUTE_ERROR("Unsupported reduction axis");
1628  }
1629 }
1630 
1631 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
1632 {
1633  ARM_COMPUTE_UNUSED(op);
1634 
1635  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
1637 
1638  if(input->num_channels() == 1)
1639  {
1641  }
1642  else
1643  {
1646  ARM_COMPUTE_RETURN_ERROR_ON(axis != 2);
1647  }
1648 
1649  ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
1650  ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
1651 
1652  if(output->total_size() != 0)
1653  {
1654  bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN);
1655  if(!is_arg_min_max)
1656  {
1659  ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() != output->num_channels());
1660  }
1661  else
1662  {
1664  }
1665 
1666  const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis);
1667  const TensorInfo tensor_info_reshaped = input->clone()->set_tensor_shape(output_shape);
1668  ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_reshaped);
1669  }
1670 
1671  return Status{};
1672 }
1673 } // namespace
1674 
1676  : _input(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::SUM_SQUARE)
1677 {
1678 }
1679 
1680 void NEReductionOperationKernel::configure(const ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op)
1681 {
1682  ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
1683 
1684  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis, op));
1685 
1686  _input = input;
1687  _output = output;
1688  _op = op;
1689  _reduction_axis = axis;
1690 
1691  // Configure kernel window
1692  Window win = calculate_max_window(*input->info(), Steps());
1693  INEKernel::configure(win);
1694 
1695  // Calculate output shape and set if empty
1697  // Output auto initialization if not yet initialized
1698  const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX);
1699  DataType output_data_type = is_arg_min_max ? DataType::S32 : input->info()->data_type();
1700  auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
1701 }
1702 
1703 Status NEReductionOperationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
1704 {
1705  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis, op));
1706 
1707  return Status{};
1708 }
1709 
1711 {
1712  ARM_COMPUTE_UNUSED(info);
1715 
1716  reduce_op(window, _input, _output, _reduction_axis, _op);
1717 }
1718 } // namespace arm_compute
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:1069
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
Shape of a tensor.
Definition: TensorShape.h:39
uint8x8_t vorr(const uint8x8_t &a, const uint8x8_t &b)
Definition: orr.h:39
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
Definition: Validate.h:115
float dequantize_qasymm8(uint8_t value, const INFO_TYPE &qinfo)
Dequantize a value given an unsigned 8-bit asymmetric quantization scheme.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(...)
Definition: Validate.h:606
ReductionOperation
Available reduction operations.
Definition: Types.h:463
SimpleTensor< float > b
Definition: DFT.cpp:157
uint8_t quantize_qasymm8(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given an unsigned 8-bit asymmetric quantization scheme.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
uint8x16_t vloadq(const uint8_t *ptr)
Definition: load.h:58
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
uint8x8_t vadd(const uint8x8_t &a, const uint8x8_t &b)
Definition: add.h:39
1 channel, 1 F32 per channel
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
float32x2_t vinv(const float32x2_t &a)
Definition: inv.h:47
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
uint8x8_t vsub(const uint8x8_t &a, const uint8x8_t &b)
Definition: sub.h:39
Status class.
Definition: Error.h:52
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
decltype(strategy::transforms) typedef type
Interface for CPU tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:159
1 channel, 1 S32 per channel
uint32x2_t vqmovn(const uint64x2_t &a)
Definition: movn.h:52
static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
Static function to check if given info will lead to a valid configuration of NEReductionOperationKern...
uint8x8_t vpadd(const uint8x8_t &a, const uint8x8_t &b)
Definition: add.h:187
uint8_t vgetlane(const uint8x8_t vector, const unsigned int lane)
Definition: getlane.h:91
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
1 channel, 1 U32 per channel
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
int8_t quantize_qasymm8_signed(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given a signed 8-bit asymmetric quantization scheme.
quantized, asymmetric fixed-point 8-bit number unsigned
Class to describe a number of elements in each dimension.
Definition: Steps.h:40
int16x4_t vreinterpret(const uint16x4_t &a)
Definition: reinterpret.h:44
uint8x8_t vmin(const uint8x8_t &a, const uint8x8_t &b)
Definition: min.h:39
uint8x8_t vand(const uint8x8_t &a, const uint8x8_t &b)
Definition: and.h:39
TensorShape compute_reduced_shape(const TensorShape &input, unsigned int axis, bool keep_dims=true)
Calculate the reduced shape of a tensor given an axis.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
uint8x8_t vgetlow(const uint8x16_t val)
Definition: getlow.h:39
uint8x16_t vcombine(const uint8x8_t &a, const uint8x8_t &b)
Definition: combine.h:39
void configure(const ITensor *input, ITensor *output, unsigned int axis, ReductionOperation op)
Set the source, destination of the kernel.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:915
uint8x8_t vgethigh(const uint8x16_t val)
Definition: gethigh.h:39
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
uint8x8_t vcgt(const uint8x8_t &a, const uint8x8_t &b)
Definition: cgt.h:39
uint8x8_t vmul(const uint8x8_t &a, const uint8x8_t &b)
Definition: mul.h:39
uint8x8_t vbsl(const uint8x8_t &a, const uint8x8_t &b, const uint8x8_t &c)
Definition: bsl.h:39
Information about executing thread and CPU.
Definition: CPPTypes.h:158
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:439
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:541
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
float32x4_t vinvq_f32(float32x4_t x)
Calculate reciprocal.
void vstore(uint8_t *ptr, uint8x8_t val)
Definition: store.h:39
uint8x8_t vclt(const uint8x8_t &a, const uint8x8_t &b)
Definition: clt.h:39
float dequantize_qasymm8_signed(int8_t value, const INFO_TYPE &qinfo)
Dequantize a value given a signed 8-bit asymmetric quantization scheme.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
uint8x8_t vdup_n(uint8_t value, traits::vector_64_tag)
Definition: dup_n.h:41
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
quantized, asymmetric fixed-point 8-bit number signed
Includes all wrapper headers at once.
uint8x8_t vpmax(const uint8x8_t &a, const uint8x8_t &b)
Definition: pmax.h:39
uint8x8_t vpmin(const uint8x8_t &a, const uint8x8_t &b)
Definition: pmin.h:39
static constexpr size_t num_max_dimensions
Number of dimensions the tensor has.
Definition: Dimensions.h:46
DataType
Available data types.
Definition: Types.h:79
uint16x8_t vmovl(const uint8x8_t &a)
Definition: movl.h:39
uint8x8_t vmax(const uint8x8_t &a, const uint8x8_t &b)
Definition: max.h:39
Describe a multidimensional execution window.
Definition: Window.h:39
uint8x8_t vceq(const uint8x8_t &a, const uint8x8_t &b)
Definition: ceq.h:39
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:201
uint32x2_t vqmovun(const int64x2_t &a)
Definition: qmovun.h:39