Compute Library
 21.02
NEPadLayerKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
30 #include "arm_compute/core/Types.h"
36 
37 namespace arm_compute
38 {
39 namespace
40 {
41 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &paddings, const PaddingMode mode)
42 {
44  ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
45  ARM_COMPUTE_RETURN_ERROR_ON_MSG(mode != PaddingMode::CONSTANT, "Only constant padding mode is supported");
46  ARM_COMPUTE_RETURN_ERROR_ON_MSG(paddings.size() > 4, "Padding list bigger than 4 dimensions");
47  if(output->total_size() != 0)
48  {
49  const TensorShape expected_output_shape = arm_compute::misc::shape_calculator::compute_padded_shape(input->tensor_shape(), paddings);
50  const TensorInfo expected_output_info = input->clone()->set_tensor_shape(expected_output_shape);
51  ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &expected_output_info);
53  }
54  return Status{};
55 }
56 } // namespace
57 
58 template <typename T>
59 void NEPadLayerKernel::run_pad_constant(const Window &window)
60 {
61  Window output_window{ window };
62  output_window.set(Window::DimX, Window::Dimension(0, 1, 1));
63 
64  const size_t element_size = _input->info()->element_size();
65  Iterator output_it(_output, output_window);
66  execute_window_loop(output_window, [&](const Coordinates & id)
67  {
68  Coordinates idin{ id };
69  for(size_t dim = _padding.size() - 1; dim > 0; --dim)
70  {
71  idin[dim] -= _padding[dim].first;
72  if(idin[dim] < 0 || static_cast<int>(_input->info()->dimension(dim)) - 1 < idin[dim])
73  {
74  std::fill_n(reinterpret_cast<T *>(output_it.ptr()), _output->info()->dimension(0), _constant_value.get<T>());
75  return;
76  }
77  }
78  T *input_it_ptr = reinterpret_cast<T *>(_input->ptr_to_element(idin));
79  T *output_it_ptr = reinterpret_cast<T *>(output_it.ptr());
80  std::fill_n(output_it_ptr, _padding[0].first, _constant_value.get<T>());
81  memcpy(output_it_ptr + _padding[0].first, input_it_ptr, _input->info()->dimension(0) * element_size);
82  std::fill_n(output_it_ptr + _padding[0].first + _input->info()->dimension(0), _padding[0].second, _constant_value.get<T>());
83  },
84  output_it);
85 }
86 
87 void NEPadLayerKernel::run_pad_constant_uint8_3Dinput_3Dpad(const Window &window)
88 {
89  ARM_COMPUTE_UNUSED(window);
90 
91  const size_t start_plane = window.z().start();
92  const size_t end_plane = window.z().end();
93 
94  size_t start_plane_input = start_plane;
95  if(_padding.size() > 2)
96  {
97  start_plane_input = (start_plane < _padding[2].first) ? 0 : start_plane - _padding[2].first;
98  }
99  const int output_plane_size = _output->info()->dimension(0) * _output->info()->dimension(1);
100  const int input_plane_size = _input->info()->dimension(0) * _input->info()->dimension(1);
101 
102  const int pad_y_elems_top = (_padding.size() > 1 ? _padding[1].first : 0) * _output->info()->dimension(0);
103  const int pad_y_elems_bot = (_padding.size() > 1 ? _padding[1].second : 0) * _output->info()->dimension(0);
104 
105  const size_t jump_to_next_row_input = _input->info()->dimension(0);
106  const size_t jump_to_next_row_output = _padding[0].first + _padding[0].second;
107 
108  uint8_t *output_row_ptr = _output->buffer() + _output->info()->offset_first_element_in_bytes() + start_plane * output_plane_size;
109  const uint8_t *input_it_ptr = _input->buffer() + _input->info()->offset_first_element_in_bytes() + start_plane_input * input_plane_size;
110  const auto pad_value = _constant_value.get<uint8_t>();
111 
112  for(size_t z_i = start_plane; z_i < end_plane; ++z_i)
113  {
114  if(_padding.size() > 2 && z_i < _padding[2].first)
115  {
116  memset(output_row_ptr, pad_value, output_plane_size);
117  output_row_ptr += output_plane_size;
118  }
119  else if(_padding.size() > 2 && z_i > (_input->info()->dimension(2) + _padding[2].first - 1))
120  {
121  memset(output_row_ptr, pad_value, output_plane_size);
122  output_row_ptr += output_plane_size;
123  }
124  else
125  {
126  memset(output_row_ptr, pad_value, pad_y_elems_top);
127  output_row_ptr += pad_y_elems_top;
128  size_t y_i = _input->info()->dimension(1);
129  // Basic loop unrolling
130  for(; y_i > 3; y_i -= 4)
131  {
132  memset(output_row_ptr, pad_value, _padding[0].first);
133  output_row_ptr += _padding[0].first;
134 
135  memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0));
136  output_row_ptr += _input->info()->dimension(0);
137  input_it_ptr += jump_to_next_row_input;
138 
139  memset(output_row_ptr, pad_value, _padding[0].second + _padding[0].first);
140  output_row_ptr += jump_to_next_row_output;
141 
142  memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0));
143  output_row_ptr += _input->info()->dimension(0);
144  input_it_ptr += jump_to_next_row_input;
145 
146  memset(output_row_ptr, pad_value, _padding[0].second + _padding[0].first);
147  output_row_ptr += jump_to_next_row_output;
148 
149  memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0));
150  output_row_ptr += _input->info()->dimension(0);
151  input_it_ptr += jump_to_next_row_input;
152 
153  memset(output_row_ptr, pad_value, _padding[0].second + _padding[0].first);
154  output_row_ptr += jump_to_next_row_output;
155 
156  memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0));
157  output_row_ptr += _input->info()->dimension(0);
158  input_it_ptr += jump_to_next_row_input;
159 
160  memset(output_row_ptr, pad_value, _padding[0].second);
161  output_row_ptr += _padding[0].second;
162  }
163  for(; y_i > 0; --y_i)
164  {
165  memset(output_row_ptr, pad_value, _padding[0].first);
166  output_row_ptr += _padding[0].first;
167 
168  memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0));
169  output_row_ptr += _input->info()->dimension(0);
170  input_it_ptr += _input->info()->dimension(0);
171 
172  memset(output_row_ptr, pad_value, _padding[0].second);
173  output_row_ptr += _padding[0].second;
174  }
175  memset(output_row_ptr, pad_value, pad_y_elems_bot);
176  output_row_ptr += pad_y_elems_bot;
177  }
178  }
179 }
180 
182  : _func(), _input(nullptr), _output(nullptr), _padding(), _constant_value(), _mode()
183 {
184 }
185 
186 void NEPadLayerKernel::configure(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode)
187 {
188  ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
189  // Auto-init
190  const TensorShape expected_output_shape = arm_compute::misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), padding);
191  const TensorInfo expected_output_info = input->info()->clone()->set_tensor_shape(expected_output_shape);
192  auto_init_if_empty(*output->info(), expected_output_info);
193 
194  // Perform validation step
195  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding, mode));
196 
197  _input = input;
198  _output = output;
199  _padding = padding;
200  _constant_value = constant_value;
201  _mode = mode;
202 
203  if(_mode == PaddingMode::CONSTANT)
204  {
205  switch(_input->info()->element_size())
206  {
207  case 1:
208  if(_input->info()->num_dimensions() == 3 && // Is 3D
209  padding.size() <= 3 && // Has 3D padding
210  !_input->info()->has_padding() && !_output->info()->has_padding()) // Input & Output have no padding
211  {
212  _func = &NEPadLayerKernel::run_pad_constant_uint8_3Dinput_3Dpad;
213  }
214  else
215  {
216  _func = &NEPadLayerKernel::run_pad_constant<uint8_t>;
217  }
218  break;
219  case 2:
220  _func = &NEPadLayerKernel::run_pad_constant<uint16_t>;
221  break;
222  case 4:
223  _func = &NEPadLayerKernel::run_pad_constant<uint32_t>;
224  break;
225  default:
226  ARM_COMPUTE_ERROR("Element size not supported");
227  break;
228  }
229  }
230  else
231  {
232  ARM_COMPUTE_ERROR("Padding mode not supported");
233  }
234 
235  // Configure kernel window
236  Window win = calculate_max_window(*output->info(), Steps());
237 
238  // The NEPad doesn't need padding so update_window_and_padding() can be skipped
239  Coordinates coord;
240  coord.set_num_dimensions(output->info()->num_dimensions());
241  output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
242 
243  ICPPKernel::configure(win);
244 }
245 
246 Status NEPadLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode)
247 {
248  ARM_COMPUTE_UNUSED(constant_value);
249  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, padding, mode));
250  return Status{};
251 }
252 
253 void NEPadLayerKernel::run(const Window &window, const ThreadInfo &info)
254 {
255  ARM_COMPUTE_UNUSED(info);
258 
259  if(_func != nullptr)
260  {
261  (this->*_func)(window);
262  }
263 }
264 } // namespace arm_compute
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
Definition: ITensor.h:63
Shape of a tensor.
Definition: TensorShape.h:39
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
std::vector< PaddingInfo > PaddingList
List of padding information.
Definition: Types.h:481
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
void get(uint8_t &v) const
Interpret the pixel value as a U8.
Definition: PixelValue.h:241
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
NEPadLayerKernel()
Default constructor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status class.
Definition: Error.h:52
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Interface for Neon tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
virtual void set_valid_region(const ValidRegion &valid_region)=0
Set the valid region of the tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
PaddingMode
Padding mode to use for PadLayer.
Definition: Types.h:169
Class to describe a number of elements in each dimension.
Definition: Steps.h:40
Coordinates of an item.
Definition: Coordinates.h:37
virtual uint8_t * buffer() const =0
Interface to be implemented by the child class to return a pointer to CPU memory. ...
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
virtual size_t element_size() const =0
Element size in bytes calculated as data_size() * num_channels()
TensorShape compute_padded_shape(const TensorShape &input_shape, const PaddingList &padding)
Calculate the padded shape of a tensor.
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, const PixelValue constant_value=PixelValue(), const PaddingMode mode=PaddingMode::CONSTANT)
Static function to check if given info will lead to a valid configuration of NEPadLayer.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941
virtual size_t offset_first_element_in_bytes() const =0
The offset from the beginning of the memory allocation to the first element of the tensor...
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
Definition: CPPTypes.h:235
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:443
void configure(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value=PixelValue(), const PaddingMode mode=PaddingMode::CONSTANT)
Initialize the function.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:45
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
void set_num_dimensions(size_t num_dimensions)
Set number of dimensions.
Definition: Dimensions.h:149
Includes all wrapper headers at once.
Container for valid region of a window.
Definition: Types.h:188
Describe a multidimensional execution window.
Definition: Window.h:39
virtual bool has_padding() const =0
Checks if the tensor has been allocated with padding or not.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205