Compute Library
 21.11
NERemapKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
36 
37 #include <arm_neon.h>
38 #include <cstddef>
39 #include <cstdint>
40 
41 using namespace arm_compute::scale_helpers;
42 
43 namespace arm_compute
44 {
45 class Coordinates;
46 
47 namespace
48 {
49 inline int32_t num_out_of_tensor(const float *mapx_ptr, const float *mapy_ptr, const int32x4_t &width_1, const int32x4_t &height_1)
50 {
51  const int32x4_t mapx_s32 = vcvtq_s32_f32(vld1q_f32(mapx_ptr));
52  const int32x4_t mapy_s32 = vcvtq_s32_f32(vld1q_f32(mapy_ptr));
53 
54  const int32x4_t outbx_s32 = vminq_s32(vmaxq_s32(vminq_s32(vsubq_s32(width_1, mapx_s32), mapx_s32), vdupq_n_s32(-1)), vdupq_n_s32(0)); // Contains -1 if out of border in x, 0 otherwise
55  const int32x4_t outby_s32 = vminq_s32(vmaxq_s32(vminq_s32(vsubq_s32(height_1, mapy_s32), mapy_s32), vdupq_n_s32(-1)), vdupq_n_s32(0)); // Contains -1 if out of border in y, 0 otherwise
56 
57  const int32x4_t out_of_tensor_v = vminq_s32(outbx_s32, outby_s32);
58 #if defined(__aarch64__)
59  // only AArch64 supports vaddv
60  return vaddvq_s32(out_of_tensor_v);
61 #else // __aarch64__
62  return vgetq_lane_s32(out_of_tensor_v, 0) + vgetq_lane_s32(out_of_tensor_v, 1) + vgetq_lane_s32(out_of_tensor_v, 2) + vgetq_lane_s32(out_of_tensor_v, 3);
63 #endif // __aarch64__
64 }
65 
66 inline void serial_remap_nearest_interpolation(const uint8_t *in_ptr, const float *mapx_ptr, const float *mapy_ptr, uint8_t *out_ptr,
67  int32_t width_val, int32_t height_val, int32_t in_stride_val, uint8_t constant_border_value)
68 {
69  const auto x_s32 = static_cast<int32_t>(*mapx_ptr);
70  const auto y_s32 = static_cast<int32_t>(*mapy_ptr);
71  if(x_s32 < 0 || y_s32 < 0 || x_s32 >= width_val || y_s32 >= height_val)
72  {
73  *(out_ptr) = constant_border_value;
74  }
75  else
76  {
77  *(out_ptr) = in_ptr[x_s32 + y_s32 * in_stride_val];
78  }
79 }
80 
81 inline int32x4_t offset_nearest_interpolation(const float *mapx_ptr, const float *mapy_ptr, const int32x4_t &stride)
82 {
83  const int32x4_t mapx_s32 = vcvtq_s32_f32(vld1q_f32(mapx_ptr));
84  const int32x4_t mapy_s32 = vcvtq_s32_f32(vld1q_f32(mapy_ptr));
85  return vmlaq_s32(mapx_s32, mapy_s32, stride);
86 }
87 
88 inline uint8_t pixel_bilinear_c1_clamp(const uint8_t *pixel_ptr, int32_t stride, int32_t width, int32_t height, float x, float y, uint8_t constant_border_value)
89 {
90  x = std::max(-1.f, std::min(x, static_cast<float>(width)));
91  y = std::max(-1.f, std::min(y, static_cast<float>(height)));
92 
93  const int32_t xi = static_cast<int32_t>(std::floor(x));
94  const int32_t yi = static_cast<int32_t>(std::floor(y));
95 
96  const float dx = x - static_cast<float>(xi);
97  const float dy = y - static_cast<float>(yi);
98 
99  // Calculating the address won't trigger a segfault in case the value is outside the tensor
100  // The ternary operator resolves the values in both conditions
101  const uint8_t *a00 = (xi < 0 || xi >= width || yi < 0 || yi >= height) ? &constant_border_value : (pixel_ptr + xi + yi * stride);
102  const uint8_t *a01 = (xi + 1 >= width || yi < 0 || yi >= height) ? &constant_border_value : (pixel_ptr + xi + 1 + yi * stride);
103  const uint8_t *a10 = (xi < 0 || xi >= width || yi + 1 >= height) ? &constant_border_value : (pixel_ptr + xi + yi * stride + stride);
104  const uint8_t *a11 = (xi + 1 >= width || yi + 1 >= height) ? &constant_border_value : (pixel_ptr + xi + 1 + yi * stride + stride);
105 
106  const float dx1 = 1.0f - dx;
107  const float dy1 = 1.0f - dy;
108  const float w1 = dx1 * dy1;
109  const float w2 = dx * dy1;
110  const float w3 = dx1 * dy;
111  const float w4 = dx * dy;
112 
113  return static_cast<uint8_t>((*a00) * w1 + (*a01) * w2 + (*a10) * w3 + (*a11) * w4);
114 }
115 } // namespace
116 
118  : _func(nullptr), _input(nullptr), _output(nullptr), _map_x(nullptr), _map_y(nullptr), _border_mode(BorderMode::UNDEFINED), _constant_border_value(0)
119 {
120 }
121 
122 void NERemapKernel::configure(const ITensor *input, const ITensor *map_x, const ITensor *map_y, ITensor *output, InterpolationPolicy policy, BorderMode border_mode, uint8_t constant_border_value)
123 {
128 
129  _input = input;
130  _output = output;
131  _map_x = map_x;
132  _map_y = map_y;
133  _border_mode = border_mode;
134  _constant_border_value = constant_border_value;
135 
136  switch(policy)
137  {
139  {
140  _func = &NERemapKernel::remap_nearest;
141  break;
142  }
144  {
145  _func = &NERemapKernel::remap_bilinear;
146  break;
147  }
148  default:
149  ARM_COMPUTE_ERROR("Unsupported interpolation mode");
150  break;
151  }
152 
153  // Configure kernel window
154  Window win = calculate_max_window(*output->info(), Steps());
156 }
157 
158 void NERemapKernel::remap_nearest(const Window &window)
159 {
160  // Don't increment in X and Y direction for the input tensor
161  // A pointer to the start of this plane is needed as base for the precomputed offsets
162  Window win_in(window);
163  win_in.set(Window::DimX, Window::Dimension(0, 0, 0));
164  win_in.set(Window::DimY, Window::Dimension(0, 0, 0));
165 
166  const auto window_start_x = static_cast<int32_t>(window.x().start());
167  const auto window_end_x = static_cast<int32_t>(window.x().end());
168  const int32_t window_step_x = 8;
169 
170  // Don't increment in X direction for the output, mapx, mapy tensors
171  Window win(window);
172  win.set(Window::DimX, Window::Dimension(0, 1, 1));
173 
174  Iterator in(_input, win_in);
175  Iterator out(_output, win);
176  Iterator mapx(_map_x, win);
177  Iterator mapy(_map_y, win);
178 
179  const int32_t width_val = static_cast<int32_t>(_input->info()->dimension(0));
180  const int32_t height_val = static_cast<int32_t>(_input->info()->dimension(1));
181  const int32_t in_stride_val = static_cast<int32_t>(_input->info()->strides_in_bytes()[1]);
182  const int32x4_t width_1 = vdupq_n_s32(width_val - 1);
183  const int32x4_t height_1 = vdupq_n_s32(height_val - 1);
184  const int32x4_t in_stride = vdupq_n_s32(in_stride_val);
185 
186  execute_window_loop(win, [&](const Coordinates &)
187  {
188  auto mapx_ptr = reinterpret_cast<const float *>(mapx.ptr());
189  auto mapy_ptr = reinterpret_cast<const float *>(mapy.ptr());
190  const uint8_t *in_ptr = in.ptr();
191  uint8_t *out_ptr = out.ptr();
192  int32_t x = window_start_x;
193  for(; x < window_end_x - window_step_x; x += window_step_x, mapx_ptr += window_step_x, mapy_ptr += window_step_x, out_ptr += window_step_x)
194  {
195  const int32_t out_of_tensor0 = num_out_of_tensor(mapx_ptr, mapy_ptr + 0, width_1, height_1);
196  const int32_t out_of_tensor1 = num_out_of_tensor(mapx_ptr + 4, mapy_ptr + 4, width_1, height_1);
197  const int32_t out_of_tensor = out_of_tensor0 + out_of_tensor1;
198 
199  if(out_of_tensor == -8)
200  {
201  // All elements are out of xy plane
202  uint8x8_t tmp = vdup_n_u8(_constant_border_value);
203  vst1_u8(out_ptr, tmp);
204  }
205  else if(out_of_tensor < 0)
206  {
207  // Some elements are out of xy plane
208  serial_remap_nearest_interpolation(in_ptr, mapx_ptr, mapy_ptr, out_ptr, width_val, height_val, in_stride_val, _constant_border_value);
209  serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 1, mapy_ptr + 1, out_ptr + 1, width_val, height_val, in_stride_val, _constant_border_value);
210  serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 2, mapy_ptr + 2, out_ptr + 2, width_val, height_val, in_stride_val, _constant_border_value);
211  serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 3, mapy_ptr + 3, out_ptr + 3, width_val, height_val, in_stride_val, _constant_border_value);
212  serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 4, mapy_ptr + 4, out_ptr + 4, width_val, height_val, in_stride_val, _constant_border_value);
213  serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 5, mapy_ptr + 5, out_ptr + 5, width_val, height_val, in_stride_val, _constant_border_value);
214  serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 6, mapy_ptr + 6, out_ptr + 6, width_val, height_val, in_stride_val, _constant_border_value);
215  serial_remap_nearest_interpolation(in_ptr, mapx_ptr + 7, mapy_ptr + 7, out_ptr + 7, width_val, height_val, in_stride_val, _constant_border_value);
216  }
217  else
218  {
219  // All elements are in xy plane
220  uint8x8_t tmp = vdup_n_u8(0);
221  const int32x4_t offset0 = offset_nearest_interpolation(mapx_ptr, mapy_ptr, in_stride);
222  const int32x4_t offset1 = offset_nearest_interpolation(mapx_ptr + 4, mapy_ptr + 4, in_stride);
223  tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset0, 0)], tmp, 0);
224  tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset0, 1)], tmp, 1);
225  tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset0, 2)], tmp, 2);
226  tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset0, 3)], tmp, 3);
227  tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset1, 0)], tmp, 4);
228  tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset1, 1)], tmp, 5);
229  tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset1, 2)], tmp, 6);
230  tmp = vset_lane_u8(in_ptr[vgetq_lane_s32(offset1, 3)], tmp, 7);
231  vst1_u8(out_ptr, tmp);
232  }
233  }
234  for(; x < window_end_x; ++x, ++mapx_ptr, ++mapy_ptr, ++out_ptr)
235  {
236  serial_remap_nearest_interpolation(in_ptr, mapx_ptr, mapy_ptr, out_ptr, width_val, height_val, in_stride_val, _constant_border_value);
237  }
238  },
239  in, out, mapx, mapy);
240 }
241 
242 void NERemapKernel::remap_bilinear(const Window &window)
243 {
244  // Don't increment in X and Y direction for the input tensor
245  // A pointer to the start of this plane is needed as base for the precomputed offsets
246  Window win_in(window);
247  win_in.set(Window::DimX, Window::Dimension(0, 0, 0));
248  win_in.set(Window::DimY, Window::Dimension(0, 0, 0));
249 
250  const auto window_start_x = static_cast<int32_t>(window.x().start());
251  const auto window_end_x = static_cast<int32_t>(window.x().end());
252  const int32_t window_step_x = 8;
253 
254  // Don't increment in X direction for the output, mapx, mapy tensors
255  Window win(window);
256  win.set(Window::DimX, Window::Dimension(0, 1, 1));
257 
258  Iterator in(_input, win_in);
259  Iterator out(_output, win);
260  Iterator mapx(_map_x, win);
261  Iterator mapy(_map_y, win);
262 
263  const int32_t width_val = static_cast<int32_t>(_input->info()->dimension(0));
264  const int32_t height_val = static_cast<int32_t>(_input->info()->dimension(1));
265  const int32x4_t width_2 = vdupq_n_s32(width_val - 2);
266  const int32x4_t height_2 = vdupq_n_s32(height_val - 2);
267  const int32_t in_stride_val = static_cast<int32_t>(_input->info()->strides_in_bytes()[1]);
268 
269  execute_window_loop(win, [&](const Coordinates &)
270  {
271  auto mapx_ptr = reinterpret_cast<const float *>(mapx.ptr());
272  auto mapy_ptr = reinterpret_cast<const float *>(mapy.ptr());
273  const uint8_t *in_ptr = in.ptr();
274  uint8_t *out_ptr = out.ptr();
275  int32_t x = window_start_x;
276  for(; x < window_end_x - window_step_x; x += window_step_x, mapx_ptr += window_step_x, mapy_ptr += window_step_x, out_ptr += window_step_x)
277  {
278  const int32_t out_of_tensor0 = num_out_of_tensor(mapx_ptr, mapy_ptr + 0, width_2, height_2);
279  const int32_t out_of_tensor1 = num_out_of_tensor(mapx_ptr + 4, mapy_ptr + 4, width_2, height_2);
280  const int32_t out_of_tensor = out_of_tensor0 + out_of_tensor1;
281 
282  if(out_of_tensor < 0)
283  {
284  // Elements are out of xy plane
285  *(out_ptr) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[0], mapy_ptr[0], _constant_border_value);
286  *(out_ptr + 1) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[1], mapy_ptr[1], _constant_border_value);
287  *(out_ptr + 2) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[2], mapy_ptr[2], _constant_border_value);
288  *(out_ptr + 3) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[3], mapy_ptr[3], _constant_border_value);
289  *(out_ptr + 4) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[4], mapy_ptr[4], _constant_border_value);
290  *(out_ptr + 5) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[5], mapy_ptr[5], _constant_border_value);
291  *(out_ptr + 6) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[6], mapy_ptr[6], _constant_border_value);
292  *(out_ptr + 7) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[7], mapy_ptr[7], _constant_border_value);
293  }
294  else
295  {
296  // All elements are in xy plane
297  uint8x8_t tmp = vdup_n_u8(0);
298  tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[0], mapy_ptr[0], _constant_border_value), tmp, 0);
299  tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[1], mapy_ptr[1], _constant_border_value), tmp, 1);
300  tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[2], mapy_ptr[2], _constant_border_value), tmp, 2);
301  tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[3], mapy_ptr[3], _constant_border_value), tmp, 3);
302  tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[4], mapy_ptr[4], _constant_border_value), tmp, 4);
303  tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[5], mapy_ptr[5], _constant_border_value), tmp, 5);
304  tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[6], mapy_ptr[6], _constant_border_value), tmp, 6);
305  tmp = vset_lane_u8(pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[7], mapy_ptr[7], _constant_border_value), tmp, 7);
306  vst1_u8(out_ptr, tmp);
307  }
308  }
309  for(; x < window_end_x; ++x, ++mapx_ptr, ++mapy_ptr, ++out_ptr)
310  {
311  *(out_ptr) = pixel_bilinear_c1_clamp(in_ptr, in_stride_val, width_val, height_val, mapx_ptr[0], mapy_ptr[0], _constant_border_value);
312  }
313  },
314  in, out, mapx, mapy);
315 }
316 
317 void NERemapKernel::run(const Window &window, const ThreadInfo &info)
318 {
319  ARM_COMPUTE_UNUSED(info);
322  ARM_COMPUTE_ERROR_ON(_func == nullptr);
323 
324  (this->*_func)(window);
325 }
326 } // namespace arm_compute
BorderMode
Methods available to handle borders.
Definition: Types.h:261
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
InterpolationPolicy
Interpolation method.
Definition: Types.h:398
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
NERemapKernel()
Default constructor.
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
1 channel, 1 U8 per channel
1 channel, 1 F32 per channel
Output values are defined by bilinear interpolation between the pixels.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:77
Output values are defined to match the source pixel whose center is nearest to the sample position...
Interface for CPU tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
Class to describe a number of elements in each dimension.
Definition: Steps.h:40
Coordinates of an item.
Definition: Coordinates.h:37
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
Definition: Helpers.inl:139
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:915
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:786
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
Definition: CPPTypes.h:158
Borders are left undefined.
void configure(const ITensor *input, const ITensor *map_x, const ITensor *map_y, ITensor *output, InterpolationPolicy policy, BorderMode border_mode, uint8_t constant_border_value=0)
Initialize the kernel&#39;s input, output and border mode.
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
virtual const Strides & strides_in_bytes() const =0
The strides in bytes for accessing each dimension of the tensor.
im2col_func configure(src_target.info(), dst_target.info(), spatial_kernel, conv_info, has_bias)
constexpr int end() const
Return the end of the dimension.
Definition: Window.h:99
Iterator updated by execute_window_loop for each window element.
Definition: Helpers.h:46
constexpr int start() const
Return the start of the dimension.
Definition: Window.h:94
Describe a multidimensional execution window.
Definition: Window.h:39
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:201
constexpr const Dimension & x() const
Alias to access the first dimension of the window.
Definition: Window.h:145