Compute Library
 21.02
ClPoolingKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
30 #include "arm_compute/core/Utils.h"
33 #include "src/core/CL/CLValidate.h"
36 #include "support/Cast.h"
37 #include "support/StringSupport.h"
38 
39 namespace arm_compute
40 {
41 namespace opencl
42 {
43 namespace kernels
44 {
46 
47 namespace
48 {
49 // Internal window config info
50 using ClPoolingConfig = std::pair<unsigned int, BorderSize>; //num_elems_processed_per_iteration, border_size
51 
52 void auto_init(const ITensorInfo *src, ITensorInfo *dst, ITensorInfo *indices, PoolingLayerInfo pool_info)
53 {
54  TensorShape out_shape = compute_pool_shape(*src, pool_info);
55  auto_init_if_empty(*dst, src->clone()->set_tensor_shape(out_shape));
56  if(indices)
57  {
58  auto_init_if_empty(*indices, src->clone()->set_tensor_shape(out_shape).set_data_type(DataType::U32));
59  }
60 }
61 
62 Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
63 {
67  ARM_COMPUTE_RETURN_ERROR_ON_MSG((is_data_type_quantized_asymmetric(src->data_type()) && pool_info.pool_type == PoolingType::L2),
68  "Unsupported combination of parameters!");
69 
70  // Check indices
71  if(indices)
72  {
74  ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_info.pool_type != PoolingType::MAX, "Pooling indices only supported for MAX pooling method");
75  ARM_COMPUTE_RETURN_ERROR_ON_MSG((pool_info.pool_size != Size2D(2, 2)), "Pooling indices only supported for pool size 2x2");
76 
77  if(indices->total_size() != 0)
78  {
79  TensorInfo idx_info(TensorInfo(compute_pool_shape(*src, pool_info), 1, DataType::U32));
81  }
82  }
83 
84  // Checks performed when dst is configured
85  if(dst->total_size() != 0)
86  {
89  TensorInfo out_info(TensorInfo(compute_pool_shape(*src, pool_info), 1, dst->data_type()));
91  }
92 
93  return Status{};
94 }
95 
96 std::tuple<Status, Window, ClPoolingConfig> validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices = nullptr)
97 {
99 
100  // Get data layout
101  const DataLayout data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
104 
105  int pool_stride_x = 0;
106  int pool_stride_y = 0;
107  unsigned int pooled_w = 0;
108  unsigned int pooled_h = 0;
109  int pool_size_x = pool_info.is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width;
110  int pool_size_y = pool_info.is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height;
111  const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
112  std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
113  const int pool_pad_right = pad_stride_info.pad_right();
114  const int pool_pad_top = pad_stride_info.pad_top();
115  const int pool_pad_left = pad_stride_info.pad_left();
116  const int pool_pad_bottom = pad_stride_info.pad_bottom();
117  BorderSize border_size = BorderSize();
118 
119  auto_init(src, dst, indices, pool_info);
120  pooled_w = dst->tensor_shape()[idx_width];
121  pooled_h = dst->tensor_shape()[idx_height];
122 
123  const DataType data_type = src->data_type();
124 
125  const int src_width = src->dimension(idx_width);
126  const int src_height = src->dimension(idx_height);
127 
128  unsigned int num_elems_processed_per_iteration = 0;
129  bool window_changed = false;
130  Window win{};
131  switch(data_layout)
132  {
133  case DataLayout::NCHW:
134  {
135  // Initialize border size
136  border_size = BorderSize(pool_pad_top, pool_pad_right, pool_pad_bottom, pool_pad_left);
137  // Change the number of elements processed per iteration
138  // for pooling 3x3 with stride less equal than 3
139  const bool can_optimize = (pool_size_x == 3) && (pool_size_y == 3) && (pool_stride_x <= 3) && !is_data_type_quantized(data_type);
140  num_elems_processed_per_iteration = can_optimize ? 4 : 1;
141  const unsigned int num_elems_read_per_iteration = (num_elems_processed_per_iteration - 1) * pool_stride_x + pool_size_x;
142 
143  // Number of iterations in X dimension
144  const int num_iterations_x = (pooled_w + num_elems_processed_per_iteration - 1) / num_elems_processed_per_iteration;
145 
146  // Upper limit for the number of right/bottom border elements that are accessed
147  const int upper_bound_w = ((num_iterations_x - 1) * num_elems_processed_per_iteration * pool_stride_x - pool_pad_left + num_elems_read_per_iteration) - src_width;
148  const int upper_bound_h = ((pooled_h - 1) * pool_stride_y - pool_pad_top + pool_size_y) - src_height;
149 
150  border_size.right = std::max(upper_bound_w, pool_pad_right);
151  border_size.bottom = std::max(upper_bound_h, pool_pad_bottom);
152 
153  win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration));
154 
155  AccessWindowRectangle src_access(src, -pool_pad_left, -pool_pad_top, num_elems_read_per_iteration, pool_size_y,
156  pool_stride_x, pool_stride_y);
157  AccessWindowHorizontal dst_access(dst, 0, num_elems_processed_per_iteration);
158 
159  // Update indices window
160  if(indices)
161  {
162  AccessWindowHorizontal indices_access(indices, 0, num_elems_processed_per_iteration);
163  window_changed = update_window_and_padding(win, src_access, dst_access, indices_access);
164  indices_access.set_valid_region(win, ValidRegion(Coordinates(), indices->tensor_shape()));
165  }
166  else
167  {
168  window_changed = update_window_and_padding(win, src_access, dst_access);
169  }
170 
171  dst_access.set_valid_region(win, ValidRegion(Coordinates(), dst->tensor_shape()));
172  break;
173  }
174  case DataLayout::NHWC:
175  {
176  // Initialize border size
177  border_size = BorderSize();
178  num_elems_processed_per_iteration = adjust_vec_size(4, dst->dimension(0));
179  win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration));
180 
181  if(indices != nullptr)
182  {
183  indices->set_valid_region(ValidRegion(Coordinates(), indices->tensor_shape()));
184  }
185 
186  dst->set_valid_region(ValidRegion(Coordinates(), dst->tensor_shape()));
187  break;
188  }
189  default:
190  ARM_COMPUTE_ERROR("Not implemented");
191  }
192 
193  Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
194  return std::make_tuple(err, win, ClPoolingConfig(num_elems_processed_per_iteration, border_size));
195 }
196 } // namespace
197 
199  : _pool_info(), _data_layout(DataLayout::UNKNOWN), _border_size(0), _num_elems_processed_per_iteration(1)
200 {
201 }
202 
204 {
205  return _border_size;
206 }
207 
208 void ClPoolingKernel::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices)
209 {
211 
212  auto padding_info = get_padding_info({ src, dst, indices });
213 
214  // Set instance variables
215  _pool_info = pool_info;
216  _data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
217  int pool_stride_x = 0;
218  int pool_stride_y = 0;
219  const PoolingType pool_type = pool_info.pool_type;
224  const int pool_size_x = pool_info.is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width;
225  const int pool_size_y = pool_info.is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height;
226  const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
227  const bool exclude_padding = pool_info.exclude_padding;
228  std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
229  const int pool_pad_top = pad_stride_info.pad_top();
230  const int pool_pad_left = pad_stride_info.pad_left();
231 
232  // Set build options
233  CLBuildOptions build_opts;
234  const DataType data_type = src->data_type();
235 
236  // Configure kernel window
237  auto win_config = validate_and_configure_window(src, dst, pool_info, indices);
238 
239  ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
240  ICLKernel::configure_internal(std::get<1>(win_config));
241 
242  ClPoolingConfig pooling_config = std::get<2>(win_config);
243  _num_elems_processed_per_iteration = pooling_config.first;
244  _border_size = pooling_config.second;
245 
246  build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(_num_elems_processed_per_iteration));
247 
248  // Tensor paddings are used to calculate the indicies for MAX pooling
249  if(pool_info.pool_size == Size2D(2, 2) && pool_type == PoolingType::MAX && indices && is_data_type_float(data_type))
250  {
251  build_opts.add_option("-DPAD_TENSOR_LEFT=" + support::cpp11::to_string(src->padding().left));
252  build_opts.add_option("-DPAD_TENSOR_RIGHT=" + support::cpp11::to_string(src->padding().right));
253  build_opts.add_option("-DPAD_TENSOR_TOP=" + support::cpp11::to_string(src->padding().top));
254  build_opts.add_option("-DPAD_TENSOR_BOTTOM=" + support::cpp11::to_string(src->padding().bottom));
255  build_opts.add_option("-DTENSOR_CHANNEL=" + support::cpp11::to_string(src->dimension(idx_channel)));
256  build_opts.add_option("-DTENSOR_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width)));
257  build_opts.add_option("-DTENSOR_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height)));
258  }
259 
261  {
262  const UniformQuantizationInfo iq_info = src->quantization_info().uniform();
263  const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
264 
265  build_opts.add_option("-DOFFSET_IN1=" + float_to_string_with_full_precision(iq_info.offset));
266  build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oq_info.offset));
267  build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq_info.scale));
268  build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
269  }
270 
271  // Check dst dimensions
272  auto_init(src, dst, indices, pool_info);
273 
274  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, pool_info, indices));
275 
276  build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
277  build_opts.add_option("-DPOOL_" + string_from_pooling_type(pool_type));
278  build_opts.add_option("-DSTRIDE_X=" + support::cpp11::to_string(pool_stride_x));
279  build_opts.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(pool_stride_y));
280  build_opts.add_option("-DPAD_X=" + support::cpp11::to_string(pool_pad_left));
281  build_opts.add_option("-DPAD_Y=" + support::cpp11::to_string(pool_pad_top));
282  build_opts.add_option("-DPOOL_SIZE_X=" + support::cpp11::to_string(pool_size_x));
283  build_opts.add_option("-DPOOL_SIZE_Y=" + support::cpp11::to_string(pool_size_y));
284 
285  // Set the initial value for the pooling operation accordingly with the data type
286  if(pool_type == PoolingType::MAX)
287  {
288  if(is_data_type_quantized(data_type))
289  {
291  std::tie(type_min, std::ignore) = get_min_max(data_type);
292  build_opts.add_option("-DINITIAL_VALUE=" + support::cpp11::to_string(type_min.get<int32_t>()));
293  }
294  else
295  {
297  }
298  }
299  else
300  {
301  // Pool AVG and Pool L2 initial value
302  build_opts.add_option("-DINITIAL_VALUE=0");
303  }
304 
305  build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width) + (exclude_padding ? 0 : pool_pad_left)));
306  build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height) + (exclude_padding ? 0 : pool_pad_top)));
307 
308  // Create kernel
309  switch(_data_layout)
310  {
311  case DataLayout::NCHW:
312  {
313  const auto use_fp_mixed_precision = (data_type == DataType::F16) && pool_info.fp_mixed_precision;
314  const auto use_wider_accumulator = use_fp_mixed_precision && (pool_type != PoolingType::MAX);
315  const auto acc_data_type = get_cl_type_from_data_type(use_wider_accumulator ? DataType::F32 : data_type);
316  build_opts.add_option("-DACC_DATA_TYPE=" + acc_data_type);
317  build_opts.add_option_if(use_wider_accumulator, "-DFP_MIXED_PRECISION");
318 
319  if(pool_type != PoolingType::MAX)
320  {
321  build_opts.add_option_if(exclude_padding, "-DEXCLUDE_PADDING");
322  }
323 
324  if((pool_size_x == 3) && (pool_size_y == 3) && !is_data_type_quantized_asymmetric(data_type))
325  {
326  // Check if we have pool3x3 with stride_x less equal than 3. In these cases, run an optimized OpenCL kernel where
327  // each thread computes 4 dst elements
328  const bool is_pool3x3_stride_le3 = (pool_size_x == 3) && (pool_size_y == 3) && (pool_stride_x <= 3);
329 
330  std::string kernel_name = ((is_pool3x3_stride_le3) ? "pooling_layer_optimized_" : "pooling_layer_")
331  + support::cpp11::to_string(pool_size_x);
332  _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
333  }
334  else if(pool_info.pool_size == Size2D(2, 2) && pool_type == PoolingType::MAX && indices && is_data_type_float(data_type))
335  {
336  // For max pooling with pool2x2, store indicies which will be used in max unpooling
337  if(data_type == DataType::F32)
338  {
339  std::string kernel_name = "pooling_layer_2_nchw_indices_fp32";
340  _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
341  }
342  else if(data_type == DataType::F16)
343  {
344  std::string kernel_name = "pooling_layer_2_nchw_indices_fp16";
345  _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
346  }
347  }
348  else // Run general case
349  {
350  std::string kernel_name = is_data_type_quantized_asymmetric(data_type) ? "pooling_layer_MxN_quantized_nchw" : "pooling_layer_MxN_nchw";
351  _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
352  }
353  break;
354  }
355  case DataLayout::NHWC:
356  {
357  // Floating point mixed precision is support on F16 only
358  const auto use_fp_mixed_precision = (data_type == DataType::F16) && pool_info.fp_mixed_precision && pool_type != PoolingType::MAX;
359 
360  // Wider accumulation is required to avoid accuracy loss
361  // Case 1: Floating point mixed precision (fp16 src data and fp32 accumulation)
362  // Cast 2: Quantized (int8/uint8 src data and int32 accumulation )
363  DataType acc_data_type = data_type;
364 
365  if(use_fp_mixed_precision)
366  {
367  acc_data_type = DataType::F32;
368  }
369  else if(is_data_type_quantized(data_type) && pool_type != PoolingType::MAX)
370  {
371  acc_data_type = DataType::S32;
372  }
373 
374  build_opts.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(acc_data_type));
375  build_opts.add_option_if(use_fp_mixed_precision, "-DFP_MIXED_PRECISION");
376  build_opts.add_option_if(exclude_padding, "-DEXCLUDE_PADDING");
377  build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width)));
378  build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height)));
379  build_opts.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(dst->dimension(idx_height)));
380  build_opts.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(dst->dimension(idx_channel)));
381  build_opts.add_option("-DDST_BATCH_SIZE=" + support::cpp11::to_string(dst->dimension(idx_batch_size)));
382  build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % _num_elems_processed_per_iteration));
383  if(pool_info.pool_size == Size2D(2, 2) && is_data_type_float(data_type))
384  {
385  build_opts.add_option_if(indices != nullptr && pool_type == PoolingType::MAX, "-DEXTRACT_MAX_INDEX");
386 
387  std::string kernel_name = "pooling_layer_2x2_nhwc";
388  _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
389  }
390  else
391  {
392  std::string kernel_name = is_data_type_quantized_asymmetric(data_type) ? "pooling_layer_MxN_quantized_nhwc" : "pooling_layer_MxN_nhwc";
393  _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
394  }
395  break;
396  }
397  default:
398  ARM_COMPUTE_ERROR("Not implemented");
399  }
400 
401  // Set config_id for enabling LWS tuning
402  _config_id = "pooling_layer_";
403  _config_id += lower_string(string_from_data_type(data_type));
404  _config_id += "_";
406  _config_id += "_";
407  _config_id += support::cpp11::to_string(dst->dimension(idx_width));
408  _config_id += "_";
409  _config_id += support::cpp11::to_string(dst->dimension(idx_height));
410  _config_id += "_";
411  _config_id += support::cpp11::to_string(dst->dimension(idx_channel));
412  _config_id += "_";
413  _config_id += lower_string(string_from_data_layout(src->data_layout()));
414 
416 }
417 
418 Status ClPoolingKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
419 {
420  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, pool_info, indices));
421  ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(src->clone().get(), dst->clone().get(), pool_info)));
422 
423  return Status{};
424 }
425 
426 void ClPoolingKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
427 {
430 
431  unsigned int pool_stride_x = 0;
432  unsigned int pool_stride_y = 0;
433  std::tie(pool_stride_x, pool_stride_y) = _pool_info.pad_stride_info.stride();
434 
435  const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
436  auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST_0));
437  auto indices = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST_1));
438 
439  // Collapse window
440  Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
441 
442  switch(_data_layout)
443  {
444  case DataLayout::NCHW:
445  {
446  Window slice = window_collapsed.first_slice_window_3D();
447  do
448  {
449  // Upsample src by pool size
450  Window in_slice(slice);
452  (in_slice.x().end() - _pool_info.pad_stride_info.pad_left()) * pool_stride_x,
453  pool_stride_x * _num_elems_processed_per_iteration));
455  (in_slice.y().end() - _pool_info.pad_stride_info.pad_top()) * pool_stride_y,
456  pool_stride_y));
457 
458  // Set srcs
459  unsigned int idx = 0;
460  add_3D_tensor_argument(idx, src, in_slice);
461  add_3D_tensor_argument(idx, dst, slice);
462  if(indices && is_data_type_float(src->info()->data_type()) && (_pool_info.pool_size == Size2D(2, 2)))
463  {
464  add_3D_tensor_argument(idx, indices, slice);
465  }
466  enqueue(queue, *this, slice, lws_hint());
467  }
468  while(window_collapsed.slide_window_slice_3D(slice));
469  break;
470  }
471  case DataLayout::NHWC:
472  {
473  const size_t batch_size = dst->info()->tensor_shape().total_size_upper(3);
474 
475  Window slice = window_collapsed.first_slice_window_4D();
476  Window in_slice = window_collapsed.first_slice_window_4D();
477  in_slice.set(Window::DimX, Window::Dimension(0, src->info()->dimension(0), _num_elems_processed_per_iteration));
478  in_slice.set(Window::DimY, Window::Dimension(0, src->info()->dimension(1), pool_stride_x));
479  in_slice.set(Window::DimZ, Window::Dimension(0, src->info()->dimension(2), pool_stride_y));
480  in_slice.set(3, Window::Dimension(0, batch_size, 1));
481  do
482  {
483  // Set srcs
484  unsigned int idx = 0;
485  add_4D_tensor_argument(idx, src, in_slice);
486  add_4D_tensor_argument(idx, dst, slice);
487  if(indices && is_data_type_float(src->info()->data_type()) && (_pool_info.pool_type == PoolingType::MAX) && (_pool_info.pool_size == Size2D(2, 2)))
488  {
489  add_4D_tensor_argument(idx, indices, slice);
490  }
491  enqueue(queue, *this, slice, lws_hint());
492  }
493  while(window.slide_window_slice_4D(slice) && window.slide_window_slice_4D(in_slice));
494  break;
495  }
496  default:
497  ARM_COMPUTE_ERROR("Not implemented");
498  }
499 }
500 } // namespace kernels
501 } // namespace opencl
502 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:1168
unsigned int top
top of the border
Definition: Types.h:375
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
#define ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(tensor)
Definition: CLValidate.h:35
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
void enqueue(IGCKernel &kernel, const Window &window, const gles::NDRange &lws=gles::NDRange(1U, 1U, 1U))
Add the kernel to the command queue with the given window.
Definition: IGCKernel.cpp:41
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(...)
Definition: Validate.h:494
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
Container for 2D border size.
Definition: Types.h:273
const StringSet & options() const
Gets the current options list set.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
cl::NDRange lws_hint() const
Return the Local-Workgroup-Size hint.
Definition: ICLKernel.h:276
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
std::string to_string(T &&value)
Convert integer and float values to string.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
const DataLayout data_layout
Definition: Im2Col.cpp:151
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Quantization info when assuming per layer quantization.
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:77
unsigned int bottom
bottom of the border
Definition: Types.h:377
unsigned int pad_top() const
Get the top padding.
Definition: Types.h:806
Status class.
Definition: Error.h:52
std::string lower_string(const std::string &val)
Lower a given string.
Definition: Utils.cpp:350
void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 3D tensor&#39;s parameters to the object&#39;s kernel&#39;s arguments starting from the index idx...
Definition: ICLKernel.h:172
SimpleTensor< float > src
Definition: DFT.cpp:155
int pool_stride_x
Copyright (c) 2017-2021 Arm Limited.
size_t height
Height of the image region or rectangle.
Definition: Size2D.h:90
void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices=nullptr)
Configure kernel for a given list of arguments.
1 channel, 1 F16 per channel
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
1 channel, 1 S32 per channel
void add_option(std::string option)
Adds option to the existing build option list.
TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
Calculate the output pool shape of a tensor.
const DataType data_type
Definition: Im2Col.cpp:150
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Definition: ITensorPack.cpp:40
cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set< std::string > &build_opts=std::set< std::string >())
Creates an opencl kernel using a compile context.
Definition: CLHelpers.cpp:403
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
Definition: Utils.cpp:135
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
bool update_window_and_padding(Window &win, Ts &&... patterns)
Update window and padding size for each of the access patterns.
Definition: WindowHelpers.h:46
Window collapse_if_possible(const Window &full_window, size_t first, size_t last, bool *has_collapsed=nullptr) const
Collapse the dimensions between first and last if possible.
Definition: Window.inl:68
1 channel, 1 U32 per channel
std::string float_to_string_with_full_precision(float val)
Create a string with the float in full precision.
Definition: Utils.h:1262
quantized, asymmetric fixed-point 8-bit number unsigned
std::pair< unsigned int, unsigned int > stride() const
Get the stride.
Definition: Types.h:770
std::string kernel_name
Pooling Layer Information struct.
Definition: Types.h:1214
UniformQuantizationInfo uniform() const
Return per layer quantization info.
std::string get_cl_type_from_data_type(const DataType &dt)
Translates a tensor data type to the appropriate OpenCL type.
Definition: CLHelpers.cpp:37
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
void add_option_if(bool cond, std::string option)
Adds option if a given condition is true;.
Padding and stride information class.
Definition: Types.h:722
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
virtual PaddingSize padding() const =0
Padding of tensor.
unsigned int left
left of the border
Definition: Types.h:378
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
unsigned int right
right of the border
Definition: Types.h:376
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941
bool has_padding_changed(const std::unordered_map< const ITensorInfo *, PaddingSize > &padding_map)
Check if the previously stored padding info has changed after configuring a kernel.
Definition: Utils.cpp:528
Num samples, channels, height, width.
CLCompileContext class.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1190
__constant DATA_TYPE16 type_min
Definition: minmaxloc.cl:46
BorderSize border_size() const override
The size of the border for that kernel.
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override
Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue...
PoolingType
Available pooling types.
Definition: Types.h:610
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
Definition: ITensorPack.cpp:50
const std::string & string_from_data_layout(DataLayout dl)
Convert a data layout identity into a string.
Definition: Utils.cpp:123
PadStrideInfo pad_stride_info
Definition: Types.h:1302
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:443
#define ARM_COMPUTE_CREATE_ERROR(error_code, msg)
Creates an error with a given message.
Definition: Error.h:159
size_t width
Width of the image region or rectangle.
Definition: Size2D.h:89
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
Manages all the OpenCL kernels compilation and caching, provides accessors for the OpenCL Context...
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
Num samples, height, width, channels.
constexpr const Dimension & y() const
Alias to access the second dimension of the window.
Definition: Window.h:154
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
std::unordered_map< const ITensorInfo *, PaddingSize > get_padding_info(std::initializer_list< const ITensorInfo *> infos)
Stores padding information before configuring a kernel.
Definition: Utils.cpp:513
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
Window first_slice_window_4D() const
First 4D slice of the window.
Definition: Window.h:299
bool slide_window_slice_4D(Window &slice) const
Slide the passed 4D window slice.
Definition: Window.h:347
unsigned int num_elems_processed_per_iteration
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
Tensor packing service.
Definition: ITensorPack.h:37
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
unsigned int adjust_vec_size(unsigned int vec_size, size_t dim0)
Returns the adjusted vector size in case it is less than the input&#39;s first dimension, getting rounded down to its closest valid vector size.
Definition: Utils.h:1358
quantized, asymmetric fixed-point 8-bit number signed
static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices=nullptr)
Static function to check if given info will lead to a valid configuration of ClPoolingKernel.
size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
constexpr int end() const
Return the end of the dimension.
Definition: Window.h:99
DataType
Available data types.
Definition: Types.h:77
void add_4D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 4D tensor&#39;s parameters to the object&#39;s kernel&#39;s arguments starting from the index idx...
Definition: ICLKernel.h:182
unsigned int pad_left() const
Get the left padding.
Definition: Types.h:796
DataLayout
[DataLayout enum definition]
Definition: Types.h:120
const std::string & string_from_pooling_type(PoolingType type)
Translates a given pooling type to a string.
Definition: Utils.cpp:248
constexpr int start() const
Return the start of the dimension.
Definition: Window.h:94
std::tuple< PixelValue, PixelValue > get_min_max(DataType dt)
Compute the mininum and maximum values a data type can take.
Definition: Utils.h:564
Describe a multidimensional execution window.
Definition: Window.h:39
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:1148
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205
SimpleTensor< T > slice(const SimpleTensor< T > &src, Coordinates starts, Coordinates ends)
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
constexpr const Dimension & x() const
Alias to access the first dimension of the window.
Definition: Window.h:145