49 inline int32_t num_out_of_tensor(
const float *mapx_ptr,
const float *mapy_ptr,
const int32x4_t &width_1,
const int32x4_t &height_1)
51 const int32x4_t mapx_s32 = vcvtq_s32_f32(vld1q_f32(mapx_ptr));
52 const int32x4_t mapy_s32 = vcvtq_s32_f32(vld1q_f32(mapy_ptr));
54 const int32x4_t outbx_s32 = vminq_s32(vmaxq_s32(vminq_s32(vsubq_s32(width_1, mapx_s32), mapx_s32), vdupq_n_s32(-1)), vdupq_n_s32(0));
55 const int32x4_t outby_s32 = vminq_s32(vmaxq_s32(vminq_s32(vsubq_s32(height_1, mapy_s32), mapy_s32), vdupq_n_s32(-1)), vdupq_n_s32(0));
57 const int32x4_t out_of_tensor_v = vminq_s32(outbx_s32, outby_s32);
58 #if defined(__aarch64__) 60 return vaddvq_s32(out_of_tensor_v);
62 return vgetq_lane_s32(out_of_tensor_v, 0) + vgetq_lane_s32(out_of_tensor_v, 1) + vgetq_lane_s32(out_of_tensor_v, 2) + vgetq_lane_s32(out_of_tensor_v, 3);
66 inline void serial_remap_nearest_interpolation(
const uint8_t *in_ptr,
const float *mapx_ptr,
const float *mapy_ptr, uint8_t *out_ptr,
67 int32_t width_val, int32_t height_val, int32_t in_stride_val, uint8_t constant_border_value)
69 const auto x_s32 =
static_cast<int32_t
>(*mapx_ptr);
70 const auto y_s32 =
static_cast<int32_t
>(*mapy_ptr);
71 if(x_s32 < 0 || y_s32 < 0 || x_s32 >= width_val || y_s32 >= height_val)
73 *(out_ptr) = constant_border_value;
77 *(out_ptr) = in_ptr[x_s32 + y_s32 * in_stride_val];
81 inline int32x4_t offset_nearest_interpolation(
const float *mapx_ptr,
const float *mapy_ptr,
const int32x4_t &stride)
83 const int32x4_t mapx_s32 = vcvtq_s32_f32(vld1q_f32(mapx_ptr));
84 const int32x4_t mapy_s32 = vcvtq_s32_f32(vld1q_f32(mapy_ptr));
85 return vmlaq_s32(mapx_s32, mapy_s32, stride);
88 inline uint8_t pixel_bilinear_c1_clamp(
const uint8_t *pixel_ptr, int32_t stride, int32_t width, int32_t height,
float x,
float y, uint8_t constant_border_value)
90 x = std::max(-1.f, std::min(x, static_cast<float>(width)));
91 y = std::max(-1.f, std::min(y, static_cast<float>(height)));
93 const int32_t xi =
static_cast<int32_t
>(std::floor(x));
94 const int32_t yi =
static_cast<int32_t
>(std::floor(y));
96 const float dx = x -
static_cast<float>(xi);
97 const float dy = y -
static_cast<float>(yi);
101 const uint8_t *a00 = (xi < 0 || xi >= width || yi < 0 || yi >= height) ? &constant_border_value : (pixel_ptr + xi + yi * stride);
102 const uint8_t *a01 = (xi + 1 >= width || yi < 0 || yi >= height) ? &constant_border_value : (pixel_ptr + xi + 1 + yi * stride);
103 const uint8_t *a10 = (xi < 0 || xi >= width || yi + 1 >= height) ? &constant_border_value : (pixel_ptr + xi + yi * stride + stride);
104 const uint8_t *a11 = (xi + 1 >= width || yi + 1 >= height) ? &constant_border_value : (pixel_ptr + xi + 1 + yi * stride + stride);
106 const float dx1 = 1.0f - dx;
107 const float dy1 = 1.0f - dy;
108 const float w1 = dx1 * dy1;
109 const float w2 = dx * dy1;
110 const float w3 = dx1 * dy;
111 const float w4 = dx * dy;
113 return static_cast<uint8_t
>((*a00) * w1 + (*a01) * w2 + (*a10) * w3 + (*a11) * w4);
118 : _func(nullptr), _input(nullptr), _output(nullptr), _map_x(nullptr), _map_y(nullptr), _border_mode(
BorderMode::
UNDEFINED), _constant_border_value(0)
133 _border_mode = border_mode;
134 _constant_border_value = constant_border_value;
140 _func = &NERemapKernel::remap_nearest;
145 _func = &NERemapKernel::remap_bilinear;
166 const auto window_start_x =
static_cast<int32_t
>(window.
x().
start());
167 const auto window_end_x =
static_cast<int32_t
>(window.
x().
end());
168 const int32_t window_step_x = 8;
179 const int32_t width_val =
static_cast<int32_t
>(_input->
info()->
dimension(0));
180 const int32_t height_val =
static_cast<int32_t
>(_input->
info()->
dimension(1));
182 const int32x4_t width_1 = vdupq_n_s32(width_val - 1);
183 const int32x4_t height_1 = vdupq_n_s32(height_val - 1);
184 const int32x4_t in_stride = vdupq_n_s32(in_stride_val);
188 auto mapx_ptr =
reinterpret_cast<const float *
>(mapx.
ptr());
189 auto mapy_ptr =
reinterpret_cast<const float *
>(mapy.
ptr());
190 const uint8_t *in_ptr = in.
ptr();
191 uint8_t *out_ptr = out.
ptr();
192 int32_t x = window_start_x;
193 for(; x < window_end_x - window_step_x; x += window_step_x, mapx_ptr += window_step_x, mapy_ptr += window_step_x, out_ptr += window_step_x)
195 const int32_t out_of_tensor0 = num_out_of_tensor(mapx_ptr, mapy_ptr + 0, width_1, height_1);
196 const int32_t out_of_tensor1 = num_out_of_tensor(mapx_ptr + 4, mapy_ptr + 4, width_1, height_1);
197 const int32_t out_of_tensor = out_of_tensor0 + out_of_tensor1;
199 if(out_of_tensor == -8)
202 uint8x8_t tmp = vdup_n_u8(_constant_border_value);
203 vst1_u8(out_ptr, tmp);
205 else if(out_of_tensor < 0)
208 serial_remap_nearest_interpolation(in_ptr, mapx_ptr, mapy_ptr, out_ptr, width_val, height_val, in_stride_val, _constant_border_value);
209 serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 1, mapy_ptr + 1, out_ptr + 1, width_val, height_val, in_stride_val, _constant_border_value);
210 serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 2, mapy_ptr + 2, out_ptr + 2, width_val, height_val, in_stride_val, _constant_border_value);
211 serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 3, mapy_ptr + 3, out_ptr + 3, width_val, height_val, in_stride_val, _constant_border_value);
212 serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 4, mapy_ptr + 4, out_ptr + 4, width_val, height_val, in_stride_val, _constant_border_value);
213 serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 5, mapy_ptr + 5, out_ptr + 5, width_val, height_val, in_stride_val, _constant_border_value);
214 serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 6, mapy_ptr + 6, out_ptr + 6, width_val, height_val, in_stride_val, _constant_border_value);
215 serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 7, mapy_ptr + 7, out_ptr + 7, width_val, height_val, in_stride_val, _constant_border_value);
220 uint8x8_t tmp = vdup_n_u8(0);
221 const int32x4_t offset0 = offset_nearest_interpolation(mapx_ptr, mapy_ptr, in_stride);
222 const int32x4_t offset1 = offset_nearest_interpolation(mapx_ptr + 4, mapy_ptr + 4, in_stride);
223 tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset0, 0)], tmp, 0);
224 tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset0, 1)], tmp, 1);
225 tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset0, 2)], tmp, 2);
226 tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset0, 3)], tmp, 3);
227 tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset1, 0)], tmp, 4);
228 tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset1, 1)], tmp, 5);
229 tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset1, 2)], tmp, 6);
230 tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset1, 3)], tmp, 7);
231 vst1_u8(out_ptr, tmp);
234 for(; x < window_end_x; ++x, ++mapx_ptr, ++mapy_ptr, ++out_ptr)
236 serial_remap_nearest_interpolation(in_ptr, mapx_ptr, mapy_ptr, out_ptr, width_val, height_val, in_stride_val, _constant_border_value);
239 in, out, mapx, mapy);
242 void NERemapKernel::remap_bilinear(
const Window &window)
250 const auto window_start_x =
static_cast<int32_t
>(window.
x().
start());
251 const auto window_end_x =
static_cast<int32_t
>(window.
x().
end());
252 const int32_t window_step_x = 8;
263 const int32_t width_val =
static_cast<int32_t
>(_input->
info()->
dimension(0));
264 const int32_t height_val =
static_cast<int32_t
>(_input->
info()->
dimension(1));
265 const int32x4_t width_2 = vdupq_n_s32(width_val - 2);
266 const int32x4_t height_2 = vdupq_n_s32(height_val - 2);
271 auto mapx_ptr =
reinterpret_cast<const float *
>(mapx.
ptr());
272 auto mapy_ptr =
reinterpret_cast<const float *
>(mapy.
ptr());
273 const uint8_t *in_ptr = in.
ptr();
274 uint8_t *out_ptr = out.
ptr();
275 int32_t x = window_start_x;
276 for(; x < window_end_x - window_step_x; x += window_step_x, mapx_ptr += window_step_x, mapy_ptr += window_step_x, out_ptr += window_step_x)
278 const int32_t out_of_tensor0 = num_out_of_tensor(mapx_ptr, mapy_ptr + 0, width_2, height_2);
279 const int32_t out_of_tensor1 = num_out_of_tensor(mapx_ptr + 4, mapy_ptr + 4, width_2, height_2);
280 const int32_t out_of_tensor = out_of_tensor0 + out_of_tensor1;
282 if(out_of_tensor < 0)
285 *(out_ptr) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[0], mapy_ptr[0], _constant_border_value);
286 *(out_ptr + 1) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[1], mapy_ptr[1], _constant_border_value);
287 *(out_ptr + 2) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[2], mapy_ptr[2], _constant_border_value);
288 *(out_ptr + 3) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[3], mapy_ptr[3], _constant_border_value);
289 *(out_ptr + 4) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[4], mapy_ptr[4], _constant_border_value);
290 *(out_ptr + 5) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[5], mapy_ptr[5], _constant_border_value);
291 *(out_ptr + 6) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[6], mapy_ptr[6], _constant_border_value);
292 *(out_ptr + 7) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[7], mapy_ptr[7], _constant_border_value);
297 uint8x8_t tmp = vdup_n_u8(0);
298 tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[0], mapy_ptr[0], _constant_border_value), tmp, 0);
299 tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[1], mapy_ptr[1], _constant_border_value), tmp, 1);
300 tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[2], mapy_ptr[2], _constant_border_value), tmp, 2);
301 tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[3], mapy_ptr[3], _constant_border_value), tmp, 3);
302 tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[4], mapy_ptr[4], _constant_border_value), tmp, 4);
303 tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[5], mapy_ptr[5], _constant_border_value), tmp, 5);
304 tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[6], mapy_ptr[6], _constant_border_value), tmp, 6);
305 tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[7], mapy_ptr[7], _constant_border_value), tmp, 7);
306 vst1_u8(out_ptr, tmp);
309 for(; x < window_end_x; ++x, ++mapx_ptr, ++mapy_ptr, ++out_ptr)
311 *(out_ptr) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[0], mapy_ptr[0], _constant_border_value);
314 in, out, mapx, mapy);
324 (this->*_func)(window);
BorderMode
Methods available to handle borders.
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
InterpolationPolicy
Interpolation method.
const Window & window() const
The maximum window the kernel can be executed on.
NERemapKernel()
Default constructor.
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
1 channel, 1 U8 per channel
1 channel, 1 F32 per channel
Output values are defined by bilinear interpolation between the pixels.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Describe one of the image's dimensions with a start, end and step.
Output values are defined to match the source pixel whose center is nearest to the sample position...
Interface for CPU tensor.
Copyright (c) 2017-2021 Arm Limited.
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
Class to describe a number of elements in each dimension.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
Borders are left undefined.
void configure(const ITensor *input, const ITensor *map_x, const ITensor *map_y, ITensor *output, InterpolationPolicy policy, BorderMode border_mode, uint8_t constant_border_value=0)
Initialize the kernel's input, output and border mode.
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
virtual const Strides & strides_in_bytes() const =0
The strides in bytes for accessing each dimension of the tensor.
im2col_func configure(src_target.info(), dst_target.info(), spatial_kernel, conv_info, has_bias)
constexpr int end() const
Return the end of the dimension.
Iterator updated by execute_window_loop for each window element.
constexpr int start() const
Return the start of the dimension.
Describe a multidimensional execution window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
constexpr const Dimension & x() const
Alias to access the first dimension of the window.