Compute Library
 21.02
NEAccumulateKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
29 #include "arm_compute/core/Types.h"
33 
34 #include <arm_neon.h>
35 
36 namespace arm_compute
37 {
38 /* Max S16 value used for saturation purposes. */
39 const static uint16x8_t max_int_u16 = vdupq_n_u16(static_cast<uint16_t>(INT16_MAX));
40 
41 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
42 namespace fp16
43 {
44 inline float16x8x2_t convert_u8x16_to_f16x8x2(uint8x16_t input)
45 {
46  const float16x8x2_t out =
47  {
48  {
49  vcvtq_f16_u16(vmovl_u8(vget_low_u8(input))),
50  vcvtq_f16_u16(vmovl_u8(vget_high_u8(input)))
51  }
52  };
53 
54  return out;
55 }
56 
57 inline uint8x16_t convert_f16x8x2_to_u8x16(const float16x8x2_t &input)
58 {
59  return vcombine_u8(vmovn_u16(vcvtq_u16_f16(input.val[0])),
60  vmovn_u16(vcvtq_u16_f16(input.val[1])));
61 }
62 
63 inline float16x8x2_t vector_accumulate_weighted(const float16x8x2_t &vec0, const float16x8x2_t &vec1, float16x8_t scale_val, float16x8_t scale_val2)
64 {
65  const float16x8x2_t res =
66  {
67  {
68  vfmaq_f16(vmulq_f16(vec1.val[0], scale_val), vec0.val[0], scale_val2),
69  vfmaq_f16(vmulq_f16(vec1.val[1], scale_val), vec0.val[1], scale_val2)
70  }
71  };
72 
73  return res;
74 }
75 
76 void acc_we_v16_u8(const void *__restrict input, void *__restrict accum, float16x8_t scale_val, float16x8_t scale_val2)
77 {
78  ARM_COMPUTE_ERROR_ON(nullptr == input);
79  ARM_COMPUTE_ERROR_ON(nullptr == accum);
80 
81  const auto input_ptr = static_cast<const uint8_t *__restrict>(input);
82  const auto accum_ptr = static_cast<uint8_t *__restrict>(accum);
83 
84  const uint8x16x4_t input_buffer = vld4q_u8(input_ptr);
85  uint8x16x4_t accum_buffer = vld4q_u8(accum_ptr);
86 
87  const float16x8x2_t f16_input_0 = convert_u8x16_to_f16x8x2(input_buffer.val[0]);
88  const float16x8x2_t f16_input_1 = convert_u8x16_to_f16x8x2(input_buffer.val[1]);
89  const float16x8x2_t f16_input_2 = convert_u8x16_to_f16x8x2(input_buffer.val[2]);
90  const float16x8x2_t f16_input_3 = convert_u8x16_to_f16x8x2(input_buffer.val[3]);
91 
92  float16x8x2_t f16_accum_0 = convert_u8x16_to_f16x8x2(accum_buffer.val[0]);
93  float16x8x2_t f16_accum_1 = convert_u8x16_to_f16x8x2(accum_buffer.val[1]);
94  float16x8x2_t f16_accum_2 = convert_u8x16_to_f16x8x2(accum_buffer.val[2]);
95  float16x8x2_t f16_accum_3 = convert_u8x16_to_f16x8x2(accum_buffer.val[3]);
96 
97  f16_accum_0 = vector_accumulate_weighted(f16_input_0, f16_accum_0, scale_val, scale_val2);
98  f16_accum_1 = vector_accumulate_weighted(f16_input_1, f16_accum_1, scale_val, scale_val2);
99  f16_accum_2 = vector_accumulate_weighted(f16_input_2, f16_accum_2, scale_val, scale_val2);
100  f16_accum_3 = vector_accumulate_weighted(f16_input_3, f16_accum_3, scale_val, scale_val2);
101 
102  accum_buffer = { {
103  convert_f16x8x2_to_u8x16(f16_accum_0),
104  convert_f16x8x2_to_u8x16(f16_accum_1),
105  convert_f16x8x2_to_u8x16(f16_accum_2),
106  convert_f16x8x2_to_u8x16(f16_accum_3)
107  }
108  };
109 
110  vst4q_u8(accum_ptr, accum_buffer);
111 }
112 } // namespace fp16
113 
114 void NEAccumulateWeightedFP16Kernel::run(const Window &window, const ThreadInfo &info)
115 {
116  ARM_COMPUTE_UNUSED(info);
119 
120  Iterator input(_input, window);
121  Iterator accum(_output, window);
122 
123  const float16x8_t scale_val = vdupq_n_f16(1.f - _alpha);
124  const float16x8_t scale_val2 = vdupq_n_f16(_alpha);
125 
126  execute_window_loop(window, [&](const Coordinates &)
127  {
128  fp16::acc_we_v16_u8(input.ptr(), accum.ptr(), scale_val, scale_val2);
129  },
130  input, accum);
131 }
132 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
133 
134 namespace
135 {
136 inline void acc_v16_u8(const void *__restrict input, void *__restrict accum)
137 {
138  ARM_COMPUTE_ERROR_ON(nullptr == input);
139  ARM_COMPUTE_ERROR_ON(nullptr == accum);
140 
141  const auto in = static_cast<const uint8_t *__restrict>(input);
142  const auto out = static_cast<int16_t *__restrict>(accum);
143 
144  uint8x16_t ta1 = vld1q_u8(in);
145  int16x8_t ta2 = vld1q_s16(out);
146  int16x8_t ta3 = vld1q_s16(out + 8);
147 
148  ta2 = vqaddq_s16(ta2, vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(ta1))));
149  ta3 = vqaddq_s16(ta3, vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(ta1))));
150 
151  vst1q_s16(out, ta2);
152  vst1q_s16(out + 8, ta3);
153 }
154 
155 inline float32x4x4_t convert_u8x16_to_f32x4x4(uint8x16_t input)
156 {
157  const uint16x8_t u16_output_low = vmovl_u8(vget_low_u8(input));
158  const uint16x8_t u16_output_hi = vmovl_u8(vget_high_u8(input));
159 
160  const float32x4x4_t res =
161  {
162  {
163  vcvtq_f32_u32(vmovl_u16(vget_low_u16(u16_output_low))),
164  vcvtq_f32_u32(vmovl_u16(vget_high_u16(u16_output_low))),
165  vcvtq_f32_u32(vmovl_u16(vget_low_u16(u16_output_hi))),
166  vcvtq_f32_u32(vmovl_u16(vget_high_u16(u16_output_hi)))
167  }
168  };
169 
170  return res;
171 }
172 
173 inline uint8x16_t convert_f32x4x4_to_u8x16(const float32x4x4_t &input)
174 {
175  return vcombine_u8(vmovn_u16(vcombine_u16(vmovn_u32(vcvtq_u32_f32(input.val[0])),
176  vmovn_u32(vcvtq_u32_f32(input.val[1])))),
177  vmovn_u16(vcombine_u16(vmovn_u32(vcvtq_u32_f32(input.val[2])),
178  vmovn_u32(vcvtq_u32_f32(input.val[3])))));
179 }
180 
181 inline float32x4x4_t vector_accumulate_weighted(const float32x4x4_t &vector_input, float32x4x4_t vector_output, float32x4_t scale_val, float32x4_t scale_val2)
182 {
183  vector_output.val[0] = vmulq_f32(vector_output.val[0], scale_val);
184  vector_output.val[1] = vmulq_f32(vector_output.val[1], scale_val);
185  vector_output.val[2] = vmulq_f32(vector_output.val[2], scale_val);
186  vector_output.val[3] = vmulq_f32(vector_output.val[3], scale_val);
187 
188  vector_output.val[0] = vmlaq_f32(vector_output.val[0], vector_input.val[0], scale_val2);
189  vector_output.val[1] = vmlaq_f32(vector_output.val[1], vector_input.val[1], scale_val2);
190  vector_output.val[2] = vmlaq_f32(vector_output.val[2], vector_input.val[2], scale_val2);
191  vector_output.val[3] = vmlaq_f32(vector_output.val[3], vector_input.val[3], scale_val2);
192 
193  return vector_output;
194 }
195 
196 inline void acc_we_v16_u8(const void *__restrict input, void *__restrict accum, const float32x4_t scale_val, const float32x4_t scale_val2)
197 {
198  ARM_COMPUTE_ERROR_ON(nullptr == input);
199  ARM_COMPUTE_ERROR_ON(nullptr == accum);
200 
201  const auto input_ptr = static_cast<const uint8_t *__restrict>(input);
202  const auto accum_ptr = static_cast<uint8_t *__restrict>(accum);
203 
204  const uint8x16_t input_buffer = vld1q_u8(input_ptr);
205  const uint8x16_t accum_buffer = vld1q_u8(accum_ptr);
206 
207  const float32x4x4_t f32_input_0 = convert_u8x16_to_f32x4x4(input_buffer);
208  const float32x4x4_t f32_output_0 = convert_u8x16_to_f32x4x4(accum_buffer);
209 
210  const float32x4x4_t f32_res_0 = vector_accumulate_weighted(f32_input_0, f32_output_0, scale_val, scale_val2);
211 
212  vst1q_u8(accum_ptr, convert_f32x4x4_to_u8x16(f32_res_0));
213 }
214 
215 void acc_sq_v16_u8(const void *__restrict input, uint32_t shift, void *__restrict accum)
216 {
217  ARM_COMPUTE_ERROR_ON(nullptr == input);
218  ARM_COMPUTE_ERROR_ON(nullptr == accum);
219  ARM_COMPUTE_ERROR_ON(shift > 15);
220 
221  const auto input_buffer = static_cast<const uint8_t *__restrict>(input);
222  const auto accum_buffer = static_cast<int16_t *__restrict>(accum);
223 
224  const uint8x16_t ta1 = vld1q_u8(input_buffer);
225  uint16x8_t ta2 = vreinterpretq_u16_s16(vld1q_s16(accum_buffer));
226  uint16x8_t ta3 = vreinterpretq_u16_s16(vld1q_s16(accum_buffer + 8));
227 
228  const int16x8_t vector_shift = vdupq_n_s16(-static_cast<int16_t>(shift));
229 
230  uint16x8_t linput = vmovl_u8(vget_low_u8(ta1));
231  uint16x8_t hinput = vmovl_u8(vget_high_u8(ta1));
232 
233  linput = vmulq_u16(linput, linput);
234  hinput = vmulq_u16(hinput, hinput);
235 
236  linput = vqshlq_u16(linput, vector_shift);
237  hinput = vqshlq_u16(hinput, vector_shift);
238 
239  ta2 = vqaddq_u16(ta2, linput);
240  ta3 = vqaddq_u16(ta3, hinput);
241 
242  vst1q_s16(accum_buffer, vreinterpretq_s16_u16(vminq_u16(max_int_u16, ta2)));
243  vst1q_s16(accum_buffer + 8, vreinterpretq_s16_u16(vminq_u16(max_int_u16, ta3)));
244 }
245 } // namespace
246 
248 {
249  ARM_COMPUTE_ERROR_ON_NULLPTR(input, accum);
250 
251  set_shape_if_empty(*accum->info(), input->info()->tensor_shape());
252 
254 
258 
259  constexpr unsigned int num_elems_processed_per_iteration = 16;
260  INESimpleKernel::configure(input, accum, num_elems_processed_per_iteration);
261 }
262 
263 void NEAccumulateKernel::run(const Window &window, const ThreadInfo &info)
264 {
265  ARM_COMPUTE_UNUSED(info);
268  Iterator input(_input, window);
269  Iterator accum(_output, window);
270 
271  execute_window_loop(window, [&](const Coordinates &)
272  {
273  acc_v16_u8(input.ptr(), accum.ptr());
274  },
275  input, accum);
276 }
277 
279  : _alpha(0.0f)
280 {
281 }
282 
283 void NEAccumulateWeightedKernel::configure(const ITensor *input, float alpha, ITensor *accum)
284 {
285  ARM_COMPUTE_ERROR_ON_NULLPTR(input, accum);
286 
287  set_shape_if_empty(*accum->info(), input->info()->tensor_shape());
288 
290 
294  ARM_COMPUTE_ERROR_ON(alpha < 0.0 || alpha > 1.0);
295 
296  _alpha = alpha;
297 
298  constexpr unsigned int num_elems_processed_per_iteration = 16;
299  INESimpleKernel::configure(input, accum, num_elems_processed_per_iteration);
300 }
301 
302 void NEAccumulateWeightedKernel::run(const Window &window, const ThreadInfo &info)
303 {
304  ARM_COMPUTE_UNUSED(info);
307 
308  Iterator input(_input, window);
309  Iterator accum(_output, window);
310 
311  const float32x4_t scale_val = vdupq_n_f32(1.f - _alpha);
312  const float32x4_t scale_val2 = vdupq_n_f32(_alpha);
313 
314  execute_window_loop(window, [&](const Coordinates &)
315  {
316  acc_we_v16_u8(input.ptr(), accum.ptr(), scale_val, scale_val2);
317  },
318  input, accum);
319 }
320 
322  : _shift(0)
323 {
324 }
325 
326 void NEAccumulateSquaredKernel::configure(const ITensor *input, uint32_t shift, ITensor *accum)
327 {
328  ARM_COMPUTE_ERROR_ON_NULLPTR(input, accum);
329 
330  set_shape_if_empty(*accum->info(), input->info()->tensor_shape());
331 
333 
337  ARM_COMPUTE_ERROR_ON(shift > 15);
338 
339  _shift = shift;
340 
341  constexpr unsigned int num_elems_processed_per_iteration = 16;
342  INESimpleKernel::configure(input, accum, num_elems_processed_per_iteration);
343 }
344 
345 void NEAccumulateSquaredKernel::run(const Window &window, const ThreadInfo &info)
346 {
347  ARM_COMPUTE_UNUSED(info);
350  Iterator input(_input, window);
351  Iterator accum(_output, window);
352 
353  execute_window_loop(window, [&](const Coordinates &)
354  {
355  acc_sq_v16_u8(input.ptr(), _shift, accum.ptr());
356  },
357  input, accum);
358 }
359 } // namespace arm_compute
bool set_format_if_unknown(ITensorInfo &info, Format format)
Set the format, data type and number of channels to the specified value if the current data type is u...
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
float16x8_t vmulq_f16(float16x8_t, float16x8_t)
Definition: clang-tidy.h:78
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
uint16x8_t vcvtq_u16_f16(float16x8_t)
Definition: clang-tidy.h:58
1 channel, 1 U8 per channel
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Interface for Neon tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
Coordinates of an item.
Definition: Coordinates.h:37
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:441
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
bool set_shape_if_empty(ITensorInfo &info, const TensorShape &shape)
Set the shape to the specified value if the current assignment is empty.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
Definition: Helpers.inl:139
float16x8_t vcvtq_f16_u16(uint16x8_t)
Definition: clang-tidy.h:53
float16x8_t vfmaq_f16(float16x8_t, float16x8_t, float16x8_t)
Definition: clang-tidy.h:88
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941
1 channel, 1 S16 per channel
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:790
void configure(const ITensor *input, ITensor *accum)
Set the input and accumulation tensors.
void configure(const ITensor *input, float alpha, ITensor *accum)
Set the input and accumulation tensors, and the scale value.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
Definition: CPPTypes.h:235
unsigned int num_elems_processed_per_iteration
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
Iterator updated by execute_window_loop for each window element.
Definition: Helpers.h:46
void configure(const ITensor *input, uint32_t shift, ITensor *accum)
Set the input and accumulation tensors and the shift value.
Describe a multidimensional execution window.
Definition: Window.h:39
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205