46 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 49 inline void mask_top(
const float16x8_t &vc,
const float16x8_t &in0,
const float16x8_t &in1, uint16x8_t &mask)
52 mask = vandq_u16(mask,
vcgeq_f16(vc, in0));
57 inline void mask_middle(
const float16x8_t &vc,
const float16x8_t &in0,
const float16x8_t &in1, uint16x8_t &mask)
60 mask = vandq_u16(mask,
vcgeq_f16(vc, in0));
64 inline void mask_bottom(
const float16x8_t &vc,
const float16x8_t &in0,
const float16x8_t &in1, uint16x8_t &mask)
67 mask = vandq_u16(mask,
vcgtq_f16(vc, in0));
72 inline void non_maxima_suppression3x3_F32_F32(
const void *__restrict in_ptr,
void *__restrict out_ptr,
const uint32_t in_stride)
74 auto in =
static_cast<const float *__restrict
>(in_ptr) - 1;
75 const auto out =
static_cast<float *__restrict
>(out_ptr);
78 const float16x8x2_t vc =
80 vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 1)), vcvt_f16_f32(vld1q_f32(in + 5))),
81 vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 9)), vcvt_f16_f32(vld1q_f32(in + 13)))
87 static const float16x4_t zero_f16x4 = vdup_n_f16(0);
88 static const uint16x8_t zero_u16 = vdupq_n_u16(0);
89 static const uint16x8_t true_mask = vceqq_u16(zero_u16, zero_u16);
90 static const uint16x8x2_t true_mask_x2 =
96 uint16x8x2_t mask = true_mask_x2;
99 const float16x8_t tmp_top0 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in)), vcvt_f16_f32(vld1q_f32(in + 4)));
100 const float16x8_t tmp_top1 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 8)), vcvt_f16_f32(vld1q_f32(in + 12)));
101 const float16x8_t tmp_top2 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 16)), zero_f16x4);
104 mask_top(vc.val[0], tmp_top0, tmp_top1, mask.val[0]);
105 mask_top(vc.val[1], tmp_top1, tmp_top2, mask.val[1]);
110 const float16x8_t tmp_mid0 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in)), vcvt_f16_f32(vld1q_f32(in + 4)));
111 const float16x8_t tmp_mid1 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 8)), vcvt_f16_f32(vld1q_f32(in + 12)));
112 const float16x8_t tmp_mid2 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 16)), zero_f16x4);
115 mask_middle(vc.val[0], tmp_mid0, tmp_mid1, mask.val[0]);
116 mask_middle(vc.val[1], tmp_mid1, tmp_mid2, mask.val[1]);
121 const float16x8_t tmp_bot0 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in)), vcvt_f16_f32(vld1q_f32(in + 4)));
122 const float16x8_t tmp_bot1 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 8)), vcvt_f16_f32(vld1q_f32(in + 12)));
123 const float16x8_t tmp_bot2 = vcombine_f16(vcvt_f16_f32(vld1q_f32(in + 16)), zero_f16x4);
126 mask_bottom(vc.val[0], tmp_bot0, tmp_bot1, mask.val[0]);
127 mask_bottom(vc.val[1], tmp_bot1, tmp_bot2, mask.val[1]);
130 static const float16x8_t zero_f16x8 = vdupq_n_f16(0);
132 const float16x8_t suppressed0 =
vbslq_f16(mask.val[0], vc.val[0], zero_f16x8);
133 vst1q_f32(out + 0, vcvt_f32_f16(vget_low_f16(suppressed0)));
134 vst1q_f32(out + 4, vcvt_f32_f16(vget_high_f16(suppressed0)));
136 const float16x8_t suppressed1 =
vbslq_f16(mask.val[1], vc.val[1], zero_f16x8);
137 vst1q_f32(out + 8, vcvt_f32_f16(vget_low_f16(suppressed1)));
138 vst1q_f32(out + 12, vcvt_f32_f16(vget_high_f16(suppressed1)));
141 inline void non_maxima_suppression3x3_U8_U8(
const void *__restrict in_ptr,
void *__restrict out_ptr,
const uint32_t in_stride)
143 auto in =
static_cast<const uint8_t *__restrict
>(in_ptr) - 1;
144 const auto out =
static_cast<uint8_t *__restrict
>(out_ptr);
147 const uint8x16_t vc = vld1q_u8(in + 1);
153 const uint8x16_t l_nc_0 = vld1q_u8(in);
154 const uint8x16_t m_nc_0 = vld1q_u8(in + 1);
155 const uint8x16_t r_nc_0 = vld1q_u8(in + 2);
159 uint8x16_t mask = vcgeq_u8(vc, l_nc_0);
160 mask = vandq_u8(mask, vcgeq_u8(vc, m_nc_0));
161 mask = vandq_u8(mask, vcgeq_u8(vc, r_nc_0));
166 const uint8x16_t l_nc_1 = vld1q_u8(in);
167 const uint8x16_t r_nc_1 = vld1q_u8(in + 2);
171 mask = vandq_u8(mask, vcgeq_u8(vc, l_nc_1));
172 mask = vandq_u8(mask, vcgtq_u8(vc, r_nc_1));
177 const uint8x16_t l_nc_2 = vld1q_u8(in);
178 const uint8x16_t m_nc_2 = vld1q_u8(in + 1);
179 const uint8x16_t r_nc_2 = vld1q_u8(in + 2);
183 mask = vandq_u8(mask, vcgtq_u8(vc, l_nc_2));
184 mask = vandq_u8(mask, vcgtq_u8(vc, m_nc_2));
185 mask = vandq_u8(mask, vcgtq_u8(vc, r_nc_2));
188 static const uint8x16_t zero = vdupq_n_u8(0);
189 vst1q_u8(out, vbslq_u8(mask, vc, zero));
205 _func = &fp16::non_maxima_suppression3x3_U8_U8;
208 _func = &fp16::non_maxima_suppression3x3_F32_F32;
213 const unsigned int num_elems_read_per_iteration = 16 + 2 * border_size().left + (input->
info()->
data_type() ==
DataType::U8 ? 0 : 3);
214 constexpr
unsigned int num_elems_written_per_iteration = 16;
215 constexpr
unsigned int num_rows_read_per_iteration = 3;
222 AccessWindowRectangle(input->
info(), -border_size().left, -border_size().top, num_elems_read_per_iteration, num_rows_read_per_iteration),
225 output_access.set_valid_region(win, input->
info()->
valid_region(), border_undefined, border_size());
227 INEKernel::configure(win);
233 inline void non_maxima_suppression3x3_FLOAT_FLOAT(
const void *__restrict input_ptr,
void *__restrict output_ptr,
const uint32_t input_stride)
235 auto input =
static_cast<const float *__restrict
>(input_ptr) - 1;
236 const auto output =
static_cast<float *__restrict
>(output_ptr);
239 const float32x4x4_t vc =
242 vld1q_f32(input + 1),
243 vld1q_f32(input + 5),
244 vld1q_f32(input + 9),
245 vld1q_f32(input + 13)
250 float32x4x4_t l_nc{ {} };
251 float32x4x4_t m_nc{ {} };
252 float32x4x4_t r_nc{ {} };
254 input -= input_stride;
257 float32x4_t tmp_low = vld1q_f32(input);
258 float32x4_t tmp_high = vld1q_f32(input + 4);
259 float32x4_t tmp_high1 = vld1q_f32(input + 8);
261 l_nc.val[0] = tmp_low;
262 m_nc.val[0] = vextq_f32(tmp_low, tmp_high, 1);
263 r_nc.val[0] = vextq_f32(tmp_low, tmp_high, 2);
266 tmp_high = tmp_high1;
268 l_nc.val[1] = tmp_low;
269 m_nc.val[1] = vextq_f32(tmp_low, tmp_high, 1);
270 r_nc.val[1] = vextq_f32(tmp_low, tmp_high, 2);
274 tmp_high = vld1q_f32(input + 12);
275 tmp_high1 = vld1q_f32(input + 16);
277 l_nc.val[2] = tmp_low;
278 m_nc.val[2] = vextq_f32(tmp_low, tmp_high, 1);
279 r_nc.val[2] = vextq_f32(tmp_low, tmp_high, 2);
282 tmp_high = tmp_high1;
284 l_nc.val[3] = tmp_low;
285 m_nc.val[3] = vextq_f32(tmp_low, tmp_high, 1);
286 r_nc.val[3] = vextq_f32(tmp_low, tmp_high, 2);
289 uint32x4x4_t mask{ {} };
290 mask.val[0] = vcgeq_f32(vc.val[0], l_nc.val[0]);
291 mask.val[0] = vandq_u32(mask.val[0], vcgeq_f32(vc.val[0], m_nc.val[0]));
292 mask.val[0] = vandq_u32(mask.val[0], vcgeq_f32(vc.val[0], r_nc.val[0]));
293 mask.val[1] = vcgeq_f32(vc.val[1], l_nc.val[1]);
294 mask.val[1] = vandq_u32(mask.val[1], vcgeq_f32(vc.val[1], m_nc.val[1]));
295 mask.val[1] = vandq_u32(mask.val[1], vcgeq_f32(vc.val[1], r_nc.val[1]));
296 mask.val[2] = vcgeq_f32(vc.val[2], l_nc.val[2]);
297 mask.val[2] = vandq_u32(mask.val[2], vcgeq_f32(vc.val[2], m_nc.val[2]));
298 mask.val[2] = vandq_u32(mask.val[2], vcgeq_f32(vc.val[2], r_nc.val[2]));
299 mask.val[3] = vcgeq_f32(vc.val[3], l_nc.val[3]);
300 mask.val[3] = vandq_u32(mask.val[3], vcgeq_f32(vc.val[3], m_nc.val[3]));
301 mask.val[3] = vandq_u32(mask.val[3], vcgeq_f32(vc.val[3], r_nc.val[3]));
303 input += input_stride;
306 tmp_low = vld1q_f32(input);
307 tmp_high = vld1q_f32(input + 4);
308 tmp_high1 = vld1q_f32(input + 8);
310 l_nc.val[0] = tmp_low;
311 r_nc.val[0] = vextq_f32(tmp_low, tmp_high, 2);
314 tmp_high = tmp_high1;
316 l_nc.val[1] = tmp_low;
317 r_nc.val[1] = vextq_f32(tmp_low, tmp_high, 2);
321 tmp_high = vld1q_f32(input + 12);
322 tmp_high1 = vld1q_f32(input + 16);
324 l_nc.val[2] = tmp_low;
325 r_nc.val[2] = vextq_f32(tmp_low, tmp_high, 2);
328 tmp_high = tmp_high1;
330 l_nc.val[3] = tmp_low;
331 r_nc.val[3] = vextq_f32(tmp_low, tmp_high, 2);
334 mask.val[0] = vandq_u32(mask.val[0], vcgeq_f32(vc.val[0], l_nc.val[0]));
335 mask.val[0] = vandq_u32(mask.val[0], vcgtq_f32(vc.val[0], r_nc.val[0]));
336 mask.val[1] = vandq_u32(mask.val[1], vcgeq_f32(vc.val[1], l_nc.val[1]));
337 mask.val[1] = vandq_u32(mask.val[1], vcgtq_f32(vc.val[1], r_nc.val[1]));
338 mask.val[2] = vandq_u32(mask.val[2], vcgeq_f32(vc.val[2], l_nc.val[2]));
339 mask.val[2] = vandq_u32(mask.val[2], vcgtq_f32(vc.val[2], r_nc.val[2]));
340 mask.val[3] = vandq_u32(mask.val[3], vcgeq_f32(vc.val[3], l_nc.val[3]));
341 mask.val[3] = vandq_u32(mask.val[3], vcgtq_f32(vc.val[3], r_nc.val[3]));
343 input += input_stride;
346 tmp_low = vld1q_f32(input);
347 tmp_high = vld1q_f32(input + 4);
348 tmp_high1 = vld1q_f32(input + 8);
350 l_nc.val[0] = tmp_low;
351 m_nc.val[0] = vextq_f32(tmp_low, tmp_high, 1);
352 r_nc.val[0] = vextq_f32(tmp_low, tmp_high, 2);
355 tmp_high = tmp_high1;
357 l_nc.val[1] = tmp_low;
358 m_nc.val[1] = vextq_f32(tmp_low, tmp_high, 1);
359 r_nc.val[1] = vextq_f32(tmp_low, tmp_high, 2);
363 tmp_high = vld1q_f32(input + 12);
364 tmp_high1 = vld1q_f32(input + 16);
366 l_nc.val[2] = tmp_low;
367 m_nc.val[2] = vextq_f32(tmp_low, tmp_high, 1);
368 r_nc.val[2] = vextq_f32(tmp_low, tmp_high, 2);
371 tmp_high = tmp_high1;
373 l_nc.val[3] = tmp_low;
374 m_nc.val[3] = vextq_f32(tmp_low, tmp_high, 1);
375 r_nc.val[3] = vextq_f32(tmp_low, tmp_high, 2);
378 mask.val[0] = vandq_u32(mask.val[0], vcgtq_f32(vc.val[0], l_nc.val[0]));
379 mask.val[0] = vandq_u32(mask.val[0], vcgtq_f32(vc.val[0], m_nc.val[0]));
380 mask.val[0] = vandq_u32(mask.val[0], vcgtq_f32(vc.val[0], r_nc.val[0]));
381 mask.val[1] = vandq_u32(mask.val[1], vcgtq_f32(vc.val[1], l_nc.val[1]));
382 mask.val[1] = vandq_u32(mask.val[1], vcgtq_f32(vc.val[1], m_nc.val[1]));
383 mask.val[1] = vandq_u32(mask.val[1], vcgtq_f32(vc.val[1], r_nc.val[1]));
384 mask.val[2] = vandq_u32(mask.val[2], vcgtq_f32(vc.val[2], l_nc.val[2]));
385 mask.val[2] = vandq_u32(mask.val[2], vcgtq_f32(vc.val[2], m_nc.val[2]));
386 mask.val[2] = vandq_u32(mask.val[2], vcgtq_f32(vc.val[2], r_nc.val[2]));
387 mask.val[3] = vandq_u32(mask.val[3], vcgtq_f32(vc.val[3], l_nc.val[3]));
388 mask.val[3] = vandq_u32(mask.val[3], vcgtq_f32(vc.val[3], m_nc.val[3]));
389 mask.val[3] = vandq_u32(mask.val[3], vcgtq_f32(vc.val[3], r_nc.val[3]));
391 static const float32x4_t zero = vdupq_n_f32(0.f);
394 vst1q_f32(output + 0, vbslq_f32(mask.val[0], vc.val[0], zero));
395 vst1q_f32(output + 4, vbslq_f32(mask.val[1], vc.val[1], zero));
396 vst1q_f32(output + 8, vbslq_f32(mask.val[2], vc.val[2], zero));
397 vst1q_f32(output + 12, vbslq_f32(mask.val[3], vc.val[3], zero));
400 inline void non_maxima_suppression3x3_U8_U8(
const void *__restrict input_ptr,
void *__restrict output_ptr,
const uint32_t input_stride)
402 auto input =
static_cast<const uint8_t *__restrict
>(input_ptr) - 1;
403 const auto output =
static_cast<uint8_t *__restrict
>(output_ptr);
406 const uint8x16_t vc = vld1q_u8(input + 1);
413 input -= input_stride;
416 l_nc = vld1q_u8(input);
417 m_nc = vld1q_u8(input + 1);
418 r_nc = vld1q_u8(input + 2);
421 uint8x16_t mask = vcgeq_u8(vc, l_nc);
422 mask = vandq_u8(mask, vcgeq_u8(vc, m_nc));
423 mask = vandq_u8(mask, vcgeq_u8(vc, r_nc));
425 input += input_stride;
428 l_nc = vld1q_u8(input);
429 r_nc = vld1q_u8(input + 2);
432 mask = vandq_u8(mask, vcgeq_u8(vc, l_nc));
433 mask = vandq_u8(mask, vcgtq_u8(vc, r_nc));
435 input += input_stride;
438 l_nc = vld1q_u8(input);
439 m_nc = vld1q_u8(input + 1);
440 r_nc = vld1q_u8(input + 2);
443 mask = vandq_u8(mask, vcgtq_u8(vc, l_nc));
444 mask = vandq_u8(mask, vcgtq_u8(vc, m_nc));
445 mask = vandq_u8(mask, vcgtq_u8(vc, r_nc));
447 static const uint8x16_t zero = vdupq_n_u8(0);
450 vst1q_u8(output, vbslq_u8(mask, vc, zero));
455 : _func(nullptr), _input(nullptr), _output(nullptr)
475 _func = &non_maxima_suppression3x3_U8_U8;
479 _func = &non_maxima_suppression3x3_FLOAT_FLOAT;
482 constexpr
unsigned int num_elems_processed_per_iteration = 16;
484 constexpr
unsigned int num_elems_written_per_iteration = 16;
485 constexpr
unsigned int num_rows_read_per_iteration = 3;
497 INEKernel::configure(win);
513 _func(input.
ptr(), output.
ptr(), input_stride);
unsigned int top
top of the border
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
Container for 2D border size.
1 channel, 1 U8 per channel
size_t element_size_from_data_type(DataType dt)
The size in bytes of the data type.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
float16x8_t vextq_f16(float16x8_t, float16x8_t, int)
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
BorderSize border_size() const override
The size of the border for that kernel.
Interface for Neon tensor.
Copyright (c) 2017-2021 Arm Limited.
virtual ValidRegion valid_region() const =0
Valid region of the tensor.
void configure(const ITensor *input, ITensor *output, bool border_undefined)
Initialise the kernel's sources, destinations and border mode.
Implementation of a rectangular access pattern.
uint16x8_t vcgeq_f16(float16x8_t, float16x8_t)
bool update_window_and_padding(Window &win, Ts &&... patterns)
Update window and padding size for each of the access patterns.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Class to describe a number of elements in each dimension.
Implementation of a row access pattern.
NENonMaximaSuppression3x3Kernel()
Default constructor.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
unsigned int left
left of the border
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
float16x8_t vbslq_f16(uint16x8_t, float16x8_t, float16x8_t)
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
unsigned int num_elems_processed_per_iteration
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
virtual const Strides & strides_in_bytes() const =0
The strides in bytes for accessing each dimension of the tensor.
Iterator updated by execute_window_loop for each window element.
uint16x8_t vcgtq_f16(float16x8_t, float16x8_t)
Describe a multidimensional execution window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)