Compute Library
 21.11
NECropKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
28 #include "arm_compute/core/Types.h"
32 #include "src/core/CPP/Validate.h"
37 
38 namespace arm_compute
39 {
40 namespace
41 {
42 template <typename T>
43 inline float32x4_t load_as_f32(T *ptr)
44 {
45  ARM_COMPUTE_UNUSED(ptr);
46  ARM_COMPUTE_ERROR("Type not supported.");
47 }
48 
49 template <>
50 inline float32x4_t load_as_f32(float *ptr)
51 {
52  return wrapper::vloadq(ptr);
53 }
54 
55 template <>
56 inline float32x4_t load_as_f32(int32_t *ptr)
57 {
58  return vcvtq_f32_s32(wrapper::vloadq(ptr));
59 }
60 
61 template <>
62 inline float32x4_t load_as_f32(uint32_t *ptr)
63 {
64  return vcvtq_f32_u32(wrapper::vloadq(ptr));
65 }
66 
67 template <>
68 inline float32x4_t load_as_f32(int16_t *ptr)
69 {
70  return vcvtq_f32_s32(vmovl_s16(wrapper::vload(ptr)));
71 }
72 
73 template <>
74 inline float32x4_t load_as_f32(uint16_t *ptr)
75 {
76  return vcvtq_f32_u32(vmovl_u16(wrapper::vload(ptr)));
77 }
78 
79 template <>
80 inline float32x4_t load_as_f32(uint8_t *ptr)
81 {
82  return vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(wrapper::vload(ptr)))));
83 }
84 
85 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
86 template <>
87 inline float32x4_t load_as_f32(float16_t *ptr)
88 {
89  return vcvt_f32_f16(wrapper::vload(ptr));
90 }
91 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
92 
93 template <typename T>
94 inline void in_bounds_crop_window(const ITensor *input, const ITensor *output, float *output_ptr, Coordinates input_offset,
95  int32_t window_step_x, int32_t output_width_start, int32_t output_width_limit, bool input_has_single_channel, bool is_width_flipped)
96 {
97  // Reverse elements if width flipped.
98  if(is_width_flipped)
99  {
100  // Collapse first dimension if possible.
101  if(input_has_single_channel)
102  {
103  int32_t x = output_width_start;
104  Coordinates negative_offset(input_offset);
105  negative_offset.set(1, negative_offset[1] - window_step_x + 1);
106  for(; x <= output_width_limit - window_step_x; x += window_step_x, negative_offset[1] -= window_step_x)
107  {
108  auto in = load_as_f32(reinterpret_cast<T *>(input->ptr_to_element(negative_offset)));
109 
110  in = wrapper::vrev64(in);
112 
113  wrapper::vstore(output_ptr + x, in);
114  }
115  input_offset[1] = negative_offset[1] + window_step_x - 1;
116  for(; x < output_width_limit; ++x, --input_offset[1])
117  {
118  *(output_ptr + x) = static_cast<float>(*reinterpret_cast<T *>(input->ptr_to_element(input_offset)));
119  }
120  }
121  else
122  {
123  for(int32_t x = output_width_start; x < output_width_limit; ++x, --input_offset[1])
124  {
125  input_offset.set(0, 0);
126  int32_t c = 0;
127  for(; c <= static_cast<int32_t>(input->info()->dimension(0)) - window_step_x; c += window_step_x, input_offset[0] += window_step_x)
128  {
129  auto in = load_as_f32(reinterpret_cast<T *>(input->ptr_to_element(input_offset)));
130  wrapper::vstore(output_ptr + x * output->info()->dimension(0) + c, in);
131  }
132  for(; c < static_cast<int32_t>(input->info()->dimension(0)); ++c, ++input_offset[0])
133  {
134  *(output_ptr + x * output->info()->dimension(0) + c) = static_cast<float>(*reinterpret_cast<T *>(input->ptr_to_element(input_offset)));
135  }
136  }
137  }
138  }
139  else
140  {
141  // Use memcpy if the elements don't need converting to float.
142  if(std::is_same<T, float>::value)
143  {
144  memcpy(static_cast<void *>(output_ptr + output_width_start * output->info()->dimension(0)),
145  reinterpret_cast<const void *>(input->ptr_to_element(input_offset)),
146  (output_width_limit - output_width_start) * output->info()->dimension(0) * output->info()->element_size());
147  }
148  else
149  {
150  int32_t x = 0;
151  int32_t limit = (output_width_limit - output_width_start) * static_cast<int32_t>(output->info()->dimension(0));
152  float *output_start_ptr = output_ptr + output_width_start * output->info()->dimension(0);
153  for(; x <= limit - window_step_x; x += window_step_x, input_offset[0] += window_step_x)
154  {
155  auto in = load_as_f32(reinterpret_cast<T *>(input->ptr_to_element(input_offset)));
156  wrapper::vstore(output_start_ptr + x, in);
157  }
158  for(; x < limit; ++x, ++input_offset[0])
159  {
160  *(output_start_ptr + x) = static_cast<float>(*reinterpret_cast<T *>(input->ptr_to_element(input_offset)));
161  }
162  }
163  }
164 }
165 
166 inline void out_of_bounds_crop_window(const ITensor *output, float *output_ptr, float extrapolation_value,
167  int32_t window_step_x, int32_t output_width_start, int32_t output_width_limit)
168 {
169  auto in = wrapper::vdup_n(extrapolation_value, wrapper::traits::vector_128_tag());
170  int32_t x = 0;
171  int32_t limit = (output_width_limit - output_width_start) * static_cast<int32_t>(output->info()->dimension(0));
172  float *output_start_ptr = output_ptr + output_width_start * output->info()->dimension(0);
173  for(; x <= limit - window_step_x; x += window_step_x)
174  {
175  wrapper::vstore(output_start_ptr + x, in);
176  }
177  for(; x < limit; ++x)
178  {
179  *(output_start_ptr + x) = extrapolation_value;
180  }
181 }
182 
183 inline void execute_window(const ITensor *input, const ITensor *output, Coordinates input_offset, float extrapolation_value,
184  const std::array<uint32_t, 2> &rows_out_of_bounds, const std::array<uint32_t, 2> &cols_out_of_bounds, NECropKernel::InBoundsCropFunction *in_bounds_crop_function,
185  bool is_height_flipped, bool has_cols_in_bounds, bool has_cols_out_of_bounds_before, bool has_cols_out_of_bounds_after, bool input_has_single_channel, bool is_width_flipped)
186 {
187  // Output is always float.
188  const int window_step_x = 16 / sizeof(float);
189  auto *output_ptr = reinterpret_cast<float *>(output->buffer());
190  // Output window:
191  // --------------------------------
192  // | Out of bounds |
193  // | rows before |
194  // |------------------------------|
195  // | Out of | In | Out of |
196  // | bounds | bounds | bounds |
197  // | cols | elements | cols |
198  // | before | copied | after |
199  // | | from input | |
200  // --------------------------------
201  // | Out of bounds |
202  // | rows after |
203  // |------------------------------|
204  // Fill all output rows that have no elements that are within the input bounds with the extrapolation value.
205  // First for the rows before the in bounds rows.
206  out_of_bounds_crop_window(output, output_ptr, extrapolation_value, window_step_x, 0, rows_out_of_bounds[0] * output->info()->dimension(1));
207  output_ptr += rows_out_of_bounds[0] * output->info()->dimension(1) * output->info()->dimension(0);
208  // Iterate through each row that has any elements within the input bounds.
209  for(uint32_t row = rows_out_of_bounds[0]; static_cast<int32_t>(row) < static_cast<int32_t>(output->info()->dimension(2) - rows_out_of_bounds[1]);
210  ++row, is_height_flipped ? --input_offset[2] : ++input_offset[2])
211  {
212  // Fill all elements in the row that are out of bounds with the extrapolation value.
213  // First for the elements before the in bounds elements.
214  if(has_cols_out_of_bounds_before)
215  {
216  out_of_bounds_crop_window(output, output_ptr, extrapolation_value, window_step_x, 0, cols_out_of_bounds[0]);
217  }
218  // Copy all elements within the input bounds from the input tensor.
219  if(has_cols_in_bounds)
220  {
221  (*in_bounds_crop_function)(input, output, output_ptr, input_offset, window_step_x, cols_out_of_bounds[0],
222  output->info()->dimension(1) - cols_out_of_bounds[1], input_has_single_channel, is_width_flipped);
223  }
224  // Fill all elements after the in bounds elements with the extrapolation value.
225  if(has_cols_out_of_bounds_after)
226  {
227  out_of_bounds_crop_window(output, output_ptr, extrapolation_value, window_step_x, output->info()->dimension(1) - cols_out_of_bounds[1], output->info()->dimension(1));
228  }
229  output_ptr += output->info()->dimension(1) * output->info()->dimension(0);
230  }
231  // Fill all rows after the in bounds elements with the extrapolation value.
232  out_of_bounds_crop_window(output, output_ptr, extrapolation_value, window_step_x, 0, rows_out_of_bounds[1] * output->info()->dimension(1));
233 }
234 } // namespace
235 
237  : _input(nullptr), _crop_boxes(nullptr), _box_ind(nullptr), _output(nullptr), _start(), _end(), _crop_box_ind(0), _extrapolation_value(0), _rows_out_of_bounds(), _cols_out_of_bounds(),
238  _in_bounds_crop_function(nullptr)
239 {
240 }
241 
242 void NECropKernel::configure(const ITensor *input, const ITensor *crop_boxes, const ITensor *box_ind, ITensor *output, uint32_t crop_box_ind, float extrapolation_value)
243 {
244  ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
245  ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), crop_boxes->info(), box_ind->info(), output->info(), crop_box_ind, extrapolation_value));
246 
247  _input = input;
248  _crop_boxes = crop_boxes;
249  _box_ind = box_ind;
250  _output = output;
251  _crop_box_ind = crop_box_ind;
252  _extrapolation_value = extrapolation_value;
253 
254  switch(input->info()->data_type())
255  {
256  case DataType::F32:
257  _in_bounds_crop_function = &in_bounds_crop_window<float>;
258  break;
259 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
260  case DataType::F16:
261  _in_bounds_crop_function = &in_bounds_crop_window<float16_t>;
262  break;
263 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
264  case DataType::U32:
265  _in_bounds_crop_function = &in_bounds_crop_window<uint32_t>;
266  break;
267  case DataType::S32:
268  _in_bounds_crop_function = &in_bounds_crop_window<int32_t>;
269  break;
270  case DataType::U16:
271  _in_bounds_crop_function = &in_bounds_crop_window<uint16_t>;
272  break;
273  case DataType::S16:
274  _in_bounds_crop_function = &in_bounds_crop_window<int16_t>;
275  break;
276  case DataType::U8:
277  _in_bounds_crop_function = &in_bounds_crop_window<uint8_t>;
278  break;
279  default:
280  ARM_COMPUTE_ERROR("Datatype not supported");
281  }
282 }
283 
284 Status NECropKernel::validate(const ITensorInfo *input, const ITensorInfo *crop_boxes, const ITensorInfo *box_ind, const ITensorInfo *output, uint32_t crop_box_ind, float extrapolation_value)
285 {
286  ARM_COMPUTE_UNUSED(extrapolation_value);
291  ARM_COMPUTE_RETURN_ERROR_ON(crop_boxes->tensor_shape()[0] != 4);
292  ARM_COMPUTE_RETURN_ERROR_ON(crop_boxes->tensor_shape()[1] != box_ind->tensor_shape()[0]);
293  ARM_COMPUTE_RETURN_ERROR_ON(crop_boxes->tensor_shape()[1] <= crop_box_ind);
294  ARM_COMPUTE_RETURN_ERROR_ON(box_ind->tensor_shape()[0] <= crop_box_ind);
295  if(output->total_size() > 0)
296  {
301  }
302  return Status{};
303 }
304 
306 {
307  // _crop_box_ind is used to index _crop_boxes and retrieve the appropriate crop box.
308  // The crop box is specified by normalized coordinates [y0, x0, y1, x1].
309  const float x0 = *reinterpret_cast<const float *>(_crop_boxes->ptr_to_element(Coordinates(1, _crop_box_ind)));
310  const float y0 = *reinterpret_cast<const float *>(_crop_boxes->ptr_to_element(Coordinates(0, _crop_box_ind)));
311  const float x1 = *reinterpret_cast<const float *>(_crop_boxes->ptr_to_element(Coordinates(3, _crop_box_ind)));
312  const float y1 = *reinterpret_cast<const float *>(_crop_boxes->ptr_to_element(Coordinates(2, _crop_box_ind)));
313  // The normalized coordiantes are scaled to retrieve the floating point image coordinates which are rounded to integers.
314  _start = Coordinates(std::floor(x0 * (_input->info()->tensor_shape()[1] - 1) + 0.5f),
315  std::floor(y0 * (_input->info()->tensor_shape()[2] - 1) + 0.5f));
316  _end = Coordinates(std::floor(x1 * (_input->info()->tensor_shape()[1] - 1) + 0.5f),
317  std::floor(y1 * (_input->info()->tensor_shape()[2] - 1) + 0.5f));
318  const TensorShape out_shape(_input->info()->tensor_shape()[0], abs(_end[0] - _start[0]) + 1, abs(_end[1] - _start[1]) + 1);
319  _output->info()->set_tensor_shape(out_shape);
320 
321  bool is_width_flipped = _end[0] < _start[0];
322  bool is_height_flipped = _end[1] < _start[1];
323  if(is_height_flipped)
324  {
325  _rows_out_of_bounds[0] = _start[1] >= static_cast<int32_t>(_input->info()->dimension(2)) ? std::min(static_cast<uint32_t>(_start[1] - _input->info()->dimension(2) + 1),
326  static_cast<uint32_t>(_output->info()->dimension(2))) :
327  0;
328  _rows_out_of_bounds[1] = _end[1] < 0 ? std::min(static_cast<uint32_t>(-_end[1]),
329  static_cast<uint32_t>(_output->info()->dimension(2))) :
330  0;
331  }
332  else
333  {
334  _rows_out_of_bounds[0] = _start[1] < 0 ? std::min(static_cast<uint32_t>(-_start[1]),
335  static_cast<uint32_t>(_output->info()->dimension(2))) :
336  0;
337  _rows_out_of_bounds[1] = _end[1] >= static_cast<int32_t>(_input->info()->dimension(2)) ? std::min(static_cast<uint32_t>(_end[1] - _input->info()->dimension(2) + 1),
338  static_cast<uint32_t>(_output->info()->dimension(2))) :
339  0;
340  }
341  if(is_width_flipped)
342  {
343  _cols_out_of_bounds[0] = _start[0] >= static_cast<int32_t>(_input->info()->dimension(1)) ? std::min(static_cast<uint32_t>(_start[0] - _input->info()->dimension(1) + 1),
344  static_cast<uint32_t>(_output->info()->dimension(1))) :
345  0;
346  _cols_out_of_bounds[1] = _end[0] < 0 ? std::min(static_cast<uint32_t>(-_end[0]),
347  static_cast<uint32_t>(_output->info()->dimension(1))) :
348  0;
349  }
350  else
351  {
352  _cols_out_of_bounds[0] = _start[0] < 0 ? std::min(static_cast<uint32_t>(-_start[0]),
353  static_cast<uint32_t>(_output->info()->dimension(1))) :
354  0;
355  _cols_out_of_bounds[1] = _end[0] >= static_cast<int32_t>(_input->info()->dimension(1)) ? std::min(static_cast<uint32_t>(_end[0] - _input->info()->dimension(1) + 1),
356  static_cast<uint32_t>(_output->info()->dimension(1))) :
357  0;
358  }
359 
360  INEKernel::configure(calculate_max_window(*_output->info()));
361 }
362 
364 {
365  ARM_COMPUTE_UNUSED(window, info);
368 
369  ARM_COMPUTE_ERROR_ON(_input->info()->has_padding());
370  ARM_COMPUTE_ERROR_ON(_output->info()->has_padding());
371 
372  uint32_t batch_index = *(reinterpret_cast<int32_t *>(_box_ind->ptr_to_element(Coordinates(_crop_box_ind))));
373  Coordinates input_offset(0, _end[0] < _start[0] ? _start[0] - _cols_out_of_bounds[0] : _start[0] + _cols_out_of_bounds[0],
374  _end[1] < _start[1] ? _start[1] - _rows_out_of_bounds[0] : _start[1] + _rows_out_of_bounds[0], batch_index);
375  execute_window(_input, _output, input_offset, _extrapolation_value, _rows_out_of_bounds, _cols_out_of_bounds, _in_bounds_crop_function, _end[1] < _start[1],
376  _cols_out_of_bounds[0] + _cols_out_of_bounds[1] < _output->info()->dimension(1), _cols_out_of_bounds[0] > 0, _cols_out_of_bounds[1] > 0,
377  _start[0] <= _end[0], _end[0] < _start[0]);
378 }
379 } // namespace arm_compute
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
Definition: ITensor.h:63
Shape of a tensor.
Definition: TensorShape.h:39
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(t,...)
Definition: Validate.h:742
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(...)
Definition: Validate.h:490
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
Definition: Validate.h:115
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
void(const ITensor *, const ITensor *, float *, Coordinates, int32_t, int32_t, int32_t, bool, bool) InBoundsCropFunction
Function to use for in bounds crop for the particular tensor types passed to configure() ...
Definition: NECropKernel.h:94
virtual ITensorInfo & set_tensor_shape(const TensorShape &shape)=0
Set the shape of an already initialized tensor.
1 channel, 1 U8 per channel
uint8x16_t vloadq(const uint8_t *ptr)
Definition: load.h:58
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
1 channel, 1 U16 per channel
Status class.
Definition: Error.h:52
void configure(const ITensor *input, const ITensor *crop_boxes, const ITensor *box_ind, ITensor *output, uint32_t crop_box_ind=0, float extrapolation_value=0)
Configure kernel.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Interface for CPU tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
1 channel, 1 S32 per channel
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
1 channel, 1 U32 per channel
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
Coordinates of an item.
Definition: Coordinates.h:37
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
uint8x8_t vgetlow(const uint8x16_t val)
Definition: getlow.h:39
void configure_output_shape()
Configure output tensor&#39;s shape as this can only be determined at runtime.
uint8x16_t vcombine(const uint8x8_t &a, const uint8x8_t &b)
Definition: combine.h:39
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
static Status validate(const ITensorInfo *input, const ITensorInfo *crop_boxes, const ITensorInfo *box_ind, const ITensorInfo *output, uint32_t crop_box_ind=0, float extrapolation_value=0)
Static function to check if given info will lead to a valid configuration of CLStridedSliceKernel.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:915
1 channel, 1 S16 per channel
uint8x8_t vgethigh(const uint8x16_t val)
Definition: gethigh.h:39
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
Definition: CPPTypes.h:158
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:143
Num samples, height, width, channels.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
uint8x8_t vrev64(const uint8x8_t &a)
Definition: rev64.h:39
uint8x8_t vload(const uint8_t *ptr)
Definition: load.h:39
void vstore(uint8_t *ptr, uint8x8_t val)
Definition: store.h:39
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
uint8x8_t vdup_n(uint8_t value, traits::vector_64_tag)
Definition: dup_n.h:41
Includes all wrapper headers at once.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(t,...)
Definition: Validate.h:690
Describe a multidimensional execution window.
Definition: Window.h:39
virtual bool has_padding() const =0
Checks if the tensor has been allocated with padding or not.
NECropKernel()
Default constructor.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:201