Compute Library
 21.11
ClDirectConv2dKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
32 #include "arm_compute/core/Utils.h"
36 #include "src/core/CL/CLUtils.h"
37 #include "src/core/CL/CLValidate.h"
41 #include "support/Cast.h"
42 #include "support/StringSupport.h"
43 namespace arm_compute
44 {
45 namespace opencl
46 {
47 namespace kernels
48 {
49 namespace
50 {
51 Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
52  const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
53 {
57 
58  const DataLayout data_layout = src->data_layout();
59  const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
60  const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
61  const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
62 
63  ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(channel_idx) != src->dimension(channel_idx), "Weights feature map dimension should match the respective src's one");
64  ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 4, "Weights can be at most 4 dimensional");
65 
66  if(data_layout == DataLayout::NCHW)
67  {
68  ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != weights->dimension(height_idx), "Weights should have same width and height");
69  ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 1) && std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported for 1x1 convolution.");
70  ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 3 || weights->dimension(width_idx) == 5 || weights->dimension(width_idx) == 9) && std::get<0>(conv_info.stride()) > 2,
71  "Strides larger than 2 not supported for 3x3, 5x5, 9x9 convolution.");
72  ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_data_type_float(src->data_type()) && act_info.enabled(), "Activation supported only for floating point and NHWC.");
73 
74  if(is_data_type_quantized(src->data_type()))
75  {
76  ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5 && weights->dimension(width_idx) != 9,
77  "Kernel sizes other than 1x1, 3x3, 5x5 or 9x9 are not supported with quantized data types");
78  }
79  else
80  {
81  ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5,
82  "Kernel sizes other than 1x1, 3x3 or 5x5 are not supported with float data types");
83  }
84  }
85 
86  if(biases != nullptr)
87  {
88  if(is_data_type_quantized_asymmetric(src->data_type()))
89  {
91  }
92  else
93  {
95  }
96  ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->dimension(0) != weights->dimension(3),
97  "Biases size and number of dst feature maps should match");
98  ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->num_dimensions() > 1,
99  "Biases should be one dimensional");
100  }
101 
102  // Checks performed when dst is configured
103  if(dst->total_size() != 0)
104  {
106  misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info));
108  }
109 
110  const auto data_type = src->data_type();
112  {
113  const UniformQuantizationInfo iqinfo = src->quantization_info().uniform();
114  const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
115  const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
116 
117  float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
118  int output_multiplier = 0;
119  int output_shift = 0;
120  ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
121  }
122  return Status{};
123 }
124 
125 inline bool can_run_optimized_kernel_for_bifrost_nchw(GPUTarget gpu_target, unsigned int conv_stride_x, unsigned int conv_stride_y, unsigned int kernel_size,
126  DataType data_type, DataLayout data_layout)
127 {
128  return gpu_target_is_in(gpu_target,
132  && (kernel_size <= 5)
133  && (conv_stride_x == 1) && (conv_stride_y == 1)
134  && (data_type == DataType::F32)
135  && (data_layout == DataLayout::NCHW);
136 }
137 
138 inline void setup_num_elems_nchw(unsigned int &num_elems_read_per_iteration_x, unsigned int &num_elems_read_per_iteration_y,
139  unsigned int &num_elems_written_per_iteration_x, unsigned int &num_elems_written_per_iteration_y,
140  unsigned int kernel_size, const PadStrideInfo &conv_info, const GPUTarget target, ITensorInfo *src)
141 {
142  const DataType data_type = src->data_type();
143  const DataLayout data_layout = src->data_layout();
144  unsigned int conv_stride_x = std::get<0>(conv_info.stride());
145  unsigned int conv_stride_y = std::get<1>(conv_info.stride());
146 
147  const bool run_optimized_bifrost = can_run_optimized_kernel_for_bifrost_nchw(target, conv_stride_x, conv_stride_y, kernel_size, data_type, data_layout);
148 
149  if(run_optimized_bifrost)
150  {
151  // Configure kernel window
152  switch(kernel_size)
153  {
154  case 1:
155  {
156  num_elems_read_per_iteration_x = 4;
157  num_elems_read_per_iteration_y = 4;
158  num_elems_written_per_iteration_x = 4;
159  num_elems_written_per_iteration_y = 4;
160  break;
161  }
162  case 3:
163  {
164  num_elems_read_per_iteration_x = 6;
165  num_elems_read_per_iteration_y = 5;
166  num_elems_written_per_iteration_x = 4;
167  num_elems_written_per_iteration_y = 3;
168  break;
169  }
170  case 5:
171  {
172  num_elems_read_per_iteration_x = 8;
173  num_elems_read_per_iteration_y = 6;
174  num_elems_written_per_iteration_x = 4;
175  num_elems_written_per_iteration_y = 2;
176  break;
177  }
178  default:
179  {
180  ARM_COMPUTE_ERROR("Kernel size not optimized for Bifrost");
181  }
182  }
183  }
184  else
185  {
186  num_elems_read_per_iteration_y = kernel_size;
187  num_elems_written_per_iteration_x = 8;
188  num_elems_written_per_iteration_y = 1;
189  switch(kernel_size)
190  {
191  case 1:
192  switch(conv_stride_x)
193  {
194  case 1:
195  num_elems_read_per_iteration_x = 8;
196  break;
197  case 2:
198  num_elems_read_per_iteration_x = 16;
199  break;
200  case 3:
201  switch(src->element_size())
202  {
203  case 1:
204  num_elems_read_per_iteration_x = 28;
205  break;
206  case 2:
207  num_elems_read_per_iteration_x = 24;
208  break;
209  case 4:
210  num_elems_read_per_iteration_x = 22;
211  break;
212  default:
213  ARM_COMPUTE_ERROR("Invalid data size");
214  }
215  break;
216  default:
217  ARM_COMPUTE_ERROR("Invalid convolution stride X");
218  }
219  break;
220  case 3:
221  switch(conv_stride_x)
222  {
223  case 1:
224  num_elems_read_per_iteration_x = 10;
225  break;
226  case 2:
227  num_elems_read_per_iteration_x = 17;
228  break;
229  default:
230  ARM_COMPUTE_ERROR("Invalid convolution stride X");
231  }
232  break;
233  case 5:
234  switch(conv_stride_x)
235  {
236  case 1:
237  num_elems_read_per_iteration_x = 12;
238  break;
239  case 2:
240  num_elems_read_per_iteration_x = 20;
241  break;
242  default:
243  ARM_COMPUTE_ERROR("Invalid convolution stride X");
244  }
245  break;
246  case 9:
247  switch(conv_stride_x)
248  {
249  case 1:
250  num_elems_read_per_iteration_x = 16;
251  break;
252  case 2:
253  num_elems_read_per_iteration_x = 24;
254  break;
255  default:
256  ARM_COMPUTE_ERROR("Invalid convolution stride X");
257  }
258  break;
259  default:
260  ARM_COMPUTE_ERROR("Invalid direct convolution size");
261  }
262  }
263 }
264 
265 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITensorInfo *weights, ITensorInfo *dst, const PadStrideInfo &conv_info, const GPUTarget target)
266 {
267  const DataLayout data_layout = src->data_layout();
268 
269  // Get dst shape
270  TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info);
271 
272  // Output auto inizialitation if not yet initialized
273  auto_init_if_empty(*dst, output_shape,
274  1,
275  src->data_type(),
276  src->quantization_info());
277 
278  if(data_layout == DataLayout::NHWC)
279  {
280  const unsigned int vec_size = std::min(static_cast<unsigned int>(dst->tensor_shape()[0]), 4u);
281  unsigned int num_rows = 1U;
282  if(dst->tensor_shape()[0] > 16)
283  {
284  num_rows = src->data_type() == DataType::F32 ? 2U : 4U;
285  }
286 
287  // Create window and update padding
288  Window win = calculate_max_window(output_shape, Steps(vec_size, num_rows));
289  return std::make_pair(Status{}, win);
290  }
291  else if(data_layout == DataLayout::NCHW)
292  {
293  const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
294  const unsigned int kernel_size = weights->dimension(width_idx);
295 
296  unsigned int num_elems_read_per_iteration_x = 0;
297  unsigned int num_elems_read_per_iteration_y = 0;
298  unsigned int num_elems_written_per_iteration_x = 0;
299  unsigned int num_elems_written_per_iteration_y = 0;
300 
301  unsigned int conv_pad_left = conv_info.pad_left();
302  unsigned int conv_pad_top = conv_info.pad_top();
303  unsigned int conv_stride_x = std::get<0>(conv_info.stride());
304  unsigned int conv_stride_y = std::get<1>(conv_info.stride());
305 
306  setup_num_elems_nchw(num_elems_read_per_iteration_x, num_elems_read_per_iteration_y,
307  num_elems_written_per_iteration_x, num_elems_written_per_iteration_y,
308  kernel_size, conv_info, target, src);
309 
310  // Create window and update padding
311  bool window_changed = false;
312  Window win = calculate_max_window(*dst, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y));
313 
314  AccessWindowRectangle input_access(src, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration_x, num_elems_read_per_iteration_y, conv_stride_x, conv_stride_y);
315  AccessWindowStatic weights_access(weights, 0, 0, kernel_size, kernel_size);
316  AccessWindowRectangle output_access(dst, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y);
317  window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
318  output_access.set_valid_region(win, ValidRegion(Coordinates(), dst->tensor_shape()));
319  Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
320  return std::make_pair(err, win);
321  }
322  else
323  {
324  ARM_COMPUTE_ERROR("Not supported");
325  }
326 }
327 
328 bool export_to_cl_image_support(ITensorInfo *tensor, GPUTarget gpu_target, DataLayout data_layout)
329 {
330  if(tensor->tensor_shape()[0] % 4 || (data_layout != DataLayout::NHWC))
331  {
332  return false;
333  }
334 
335  // If not floating point
336  if(!is_data_type_float(tensor->data_type()))
337  {
338  return false;
339  }
340 
341  if(gpu_target == GPUTarget::G71 || get_arch_from_target(gpu_target) == GPUTarget::MIDGARD)
342  {
343  return false;
344  }
345 
346  // Check if the cl_khr_image2d_from_buffer extension is supported on the target platform
348  {
349  return false;
350  }
351 
352  // Check cl image pitch alignment
354  {
355  return false;
356  }
357 
358  const size_t image_w = tensor->tensor_shape()[0] / 4;
359  const size_t image_h = tensor->tensor_shape()[1] * tensor->tensor_shape()[2] * tensor->tensor_shape()[3];
360  const size_t max_image_w = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_WIDTH>();
361  const size_t max_image_h = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_HEIGHT>();
362 
363  if(image_w > max_image_w || image_h > max_image_h)
364  {
365  return false;
366  }
367 
368  return true;
369 }
370 
371 } // namespace
372 
374 {
375  return _border_size;
376 }
377 
379 {
380  _type = CLKernelType::DIRECT;
381 }
382 
383 void ClDirectConv2dKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
384  const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
385 {
386  ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
387 
388  // Perform validation
389  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, weights, biases, dst, conv_info, act_info));
390 
391  const int conv_stride_x = std::get<0>(conv_info.stride());
392  const int conv_stride_y = std::get<1>(conv_info.stride());
393 
394  _data_layout = src->data_layout();
396 
397  const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
398  const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
399  const unsigned int channel_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
400  const unsigned int kernel_size = weights->dimension(width_idx);
401  const DataType data_type = src->data_type();
402 
403  const GPUTarget gpu_target = get_target();
404 
405  // Configure kernel window
406  auto win_config = validate_and_configure_window(src, weights, dst, conv_info, gpu_target);
407  ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
408  ICLKernel::configure_internal(win_config.second);
409 
410  std::stringstream kernel_name;
412 
413  if(_data_layout == DataLayout::NHWC)
414  {
416 
417  kernel_name << "direct_convolution_nhwc";
418 
419  const unsigned int n0 = win_config.second.x().step();
420  const unsigned int m0 = win_config.second.y().step();
421  const unsigned int k0 = adjust_vec_size(is_data_type_quantized(data_type) ? 16u : 8u, src->dimension(channel_idx));
422  const unsigned int partial_store_n0 = dst->dimension(channel_idx) % n0;
423  const unsigned int pad_left = conv_info.pad_left();
424  const unsigned int pad_top = conv_info.pad_top();
425  const bool export_to_cl_image = export_to_cl_image_support(weights, gpu_target, _data_layout);
426 
427  // Update the padding for the weights tensor if we can export to cl_image
428  if(export_to_cl_image)
429  {
431  }
432 
433  if(biases != nullptr)
434  {
435  build_options.add_option(std::string("-DHAS_BIAS"));
436  build_options.add_option(std::string("-DBIA_DATA_TYPE=" + get_cl_type_from_data_type(biases->data_type())));
437  }
438 
439  build_options.add_option("-cl-fast-relaxed-math");
440  build_options.add_option("-DSRC_TENSOR_TYPE=BUFFER");
441  build_options.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src->dimension(width_idx)));
442  build_options.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(height_idx)));
443  build_options.add_option("-DSRC_CHANNELS=" + support::cpp11::to_string(src->dimension(channel_idx)));
444  build_options.add_option("-DSRC_DATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
445  build_options.add_option("-DDST_TENSOR_TYPE=BUFFER");
446  build_options.add_option("-DDST_WIDTH=" + support::cpp11::to_string(dst->dimension(width_idx)));
447  build_options.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(dst->dimension(height_idx)));
448  build_options.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(dst->dimension(channel_idx)));
449  build_options.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(dst->data_type()));
450  build_options.add_option_if_else(export_to_cl_image, "-DWEI_TENSOR_TYPE=IMAGE", "-DWEI_TENSOR_TYPE=BUFFER");
451  build_options.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(weights->dimension(width_idx)));
452  build_options.add_option("-DWEI_HEIGHT=" + support::cpp11::to_string(weights->dimension(height_idx)));
453  build_options.add_option("-DWEI_DATA_TYPE=" + get_cl_type_from_data_type(weights->data_type()));
454  build_options.add_option("-DSTRIDE_X=" + support::cpp11::to_string(conv_stride_x));
455  build_options.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(conv_stride_y));
456  build_options.add_option("-DPAD_LEFT=" + support::cpp11::to_string(pad_left));
457  build_options.add_option("-DPAD_TOP=" + support::cpp11::to_string(pad_top));
458  build_options.add_option("-DN0=" + support::cpp11::to_string(n0));
459  build_options.add_option("-DM0=" + support::cpp11::to_string(m0));
460  build_options.add_option("-DK0=" + support::cpp11::to_string(k0));
461  build_options.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(partial_store_n0));
462  build_options.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
463 
464  if(is_data_type_quantized(data_type))
465  {
466  const UniformQuantizationInfo iqinfo = src->quantization_info().uniform();
467  const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
468  const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
469 
470  PixelValue zero_value = PixelValue(0, src->data_type(), src->quantization_info());
471  int zero_value_s32;
472  zero_value.get(zero_value_s32);
473 
474  float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
475  int output_multiplier = 0;
476  int output_shift = 0;
477  quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
478  build_options.add_option("-DIS_QUANTIZED");
479  build_options.add_option("-DDST_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
480  build_options.add_option("-DDST_SHIFT=" + support::cpp11::to_string(output_shift));
481  build_options.add_option("-DSRC_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
482  build_options.add_option("-DWEI_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
483  build_options.add_option("-DDST_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
484  build_options.add_option("-DZERO_VALUE=" + support::cpp11::to_string(zero_value_s32));
485  build_options.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(DataType::S32));
486  }
487  else
488  {
489  build_options.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(data_type));
490  build_options.add_option("-DZERO_VALUE=" + support::cpp11::to_string(0));
491  build_options.add_option("-DSRC_OFFSET=" + support::cpp11::to_string(0));
492  build_options.add_option("-DWEI_OFFSET=" + support::cpp11::to_string(0));
493  build_options.add_option("-DDST_OFFSET=" + support::cpp11::to_string(0));
494  build_options.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
495  build_options.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
496  }
497  }
498  else
499  {
500  _border_size = BorderSize(src->padding());
501 
502  kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size;
503 
504  build_options.add_option_if(biases != nullptr, std::string("-DHAS_BIAS"));
505 
506  const bool run_optimized_for_bifrost = can_run_optimized_kernel_for_bifrost_nchw(gpu_target, conv_stride_x, conv_stride_y, kernel_size, data_type, _data_layout);
507 
508  if(run_optimized_for_bifrost)
509  {
510  build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(weights->dimension(channel_idx))));
511 
512  kernel_name << "_f32_bifrost";
513  }
514  else
515  {
516  build_options.add_option(std::string("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)));
517  build_options.add_option(std::string("-DDATA_SIZE=" + get_data_size_from_data_type(data_type)));
518  build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(weights->dimension(channel_idx))));
519  build_options.add_option(std::string("-DSTRIDE_X=" + support::cpp11::to_string(conv_stride_x)));
520  build_options.add_option(std::string("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(data_type)));
521 
522  if(is_data_type_quantized(data_type))
523  {
524  const UniformQuantizationInfo iqinfo = src->quantization_info().uniform();
525  const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
526  const UniformQuantizationInfo oqinfo = dst->quantization_info().uniform();
527 
528  float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
529  int output_multiplier = 0;
530  int output_shift = 0;
531  quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
532  build_options.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
533  build_options.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
534  build_options.add_option("-DKERNEL_SIZE=" + support::cpp11::to_string(kernel_size));
535  build_options.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
536  build_options.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
537  build_options.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
538 
539  kernel_name.str("direct_convolution_quantized");
540  }
541  }
542  }
543 
544  _kernel = create_kernel(compile_context, kernel_name.str(), build_options.options());
545 
546  // Set config_id for enabling LWS tuning
547  _config_id = kernel_name.str();
548  _config_id += "_";
549  _config_id += lower_string(string_from_data_type(data_type));
550  _config_id += "_";
551  _config_id += support::cpp11::to_string(kernel_size);
552  _config_id += "_";
553  _config_id += support::cpp11::to_string(border_size().left);
554  _config_id += "_";
555  _config_id += support::cpp11::to_string(border_size().top);
556  _config_id += "_";
557  _config_id += support::cpp11::to_string(border_size().right);
558  _config_id += "_";
559  _config_id += support::cpp11::to_string(border_size().bottom);
560  _config_id += "_";
561  _config_id += support::cpp11::to_string(conv_stride_x);
562  _config_id += "_";
563  _config_id += support::cpp11::to_string(conv_stride_y);
564  _config_id += "_";
565  _config_id += support::cpp11::to_string(dst->dimension(width_idx));
566  _config_id += "_";
567  _config_id += support::cpp11::to_string(dst->dimension(height_idx));
568  _config_id += "_";
569  _config_id += lower_string(string_from_data_layout(_data_layout));
570 }
571 
572 Status ClDirectConv2dKernel::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
573  const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, const GPUTarget target)
574 {
575  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, weights, biases, dst, conv_info, act_info));
576  ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), weights->clone().get(), dst->clone().get(), conv_info, target).first);
577 
578  return Status{};
579 }
580 
581 void ClDirectConv2dKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
582 {
585 
586  // Get initial windows
588 
589  const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
590  const auto weights = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
591  const auto biases = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_2));
592  auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
593 
595  {
596  cl::Image2D weights_cl_image;
597 
598  const size_t dim_y_collapsed = ceil_to_multiple(dst->info()->dimension(1) * dst->info()->dimension(2), slice.y().step());
599  const bool export_to_cl_image = export_to_cl_image_support(weights->info(), get_target(), _data_layout);
600 
601  slice.set(Window::DimY, Window::Dimension(0, dim_y_collapsed, slice.y().step()));
602  slice.set(Window::DimZ, Window::Dimension(0, dst->info()->dimension(3), 1));
603 
604  if(export_to_cl_image)
605  {
606  const size_t image_w = weights->info()->dimension(0) / 4;
607  const size_t image_h = weights->info()->dimension(1) * weights->info()->dimension(2) * weights->info()->dimension(3);
608  const TensorShape shape2d(image_w, image_h);
609  const size_t image_row_pitch = weights->info()->strides_in_bytes()[1];
610 
611  // Export cl_buffer to cl_image
612  weights_cl_image = create_image2d_from_buffer(CLKernelLibrary::get().context(), weights->cl_buffer(), shape2d, weights->info()->data_type(), image_row_pitch);
613  }
614 
615  unsigned int idx = 0;
616  add_4D_tensor_argument(idx, src, slice);
617  add_4D_tensor_argument(idx, dst, slice);
618  if(export_to_cl_image)
619  {
620  _kernel.setArg(idx++, weights_cl_image);
621  }
622  add_4D_tensor_argument(idx, weights, slice);
623  if(biases != nullptr)
624  {
625  add_1D_tensor_argument(idx, biases, slice);
626  }
627  enqueue(queue, *this, slice, lws_hint());
628  }
629  else
630  {
631  Window win_in = window;
632 
633  win_in.adjust(Window::DimX, -_conv_info.pad_left(), true);
634  win_in.adjust(Window::DimY, -_conv_info.pad_top(), true);
635 
638 
639  const int conv_stride_x = std::get<0>(_conv_info.stride());
640  const int conv_stride_y = std::get<1>(_conv_info.stride());
641 
642  win_in.set_dimension_step(width_idx, window[width_idx].step() * conv_stride_x);
643  win_in.set_dimension_step(height_idx, window[height_idx].step() * conv_stride_y);
644 
645  Window slice_in = win_in.first_slice_window_3D();
646  unsigned int idx1 = 2 * num_arguments_per_3D_tensor();
647  add_3D_tensor_argument(idx1, weights, slice);
648 
649  if(biases != nullptr)
650  {
651  Window slice_biases;
652  slice_biases.use_tensor_dimensions(biases->info()->tensor_shape());
653  add_1D_tensor_argument(idx1, biases, slice_biases);
654  }
655 
656  _kernel.setArg(idx1++, static_cast<unsigned int>(weights->info()->strides_in_bytes()[3]));
657 
658  do
659  {
660  unsigned int idx = 0;
661  add_3D_tensor_argument(idx, src, slice_in);
662  add_3D_tensor_argument(idx, dst, slice);
663  enqueue(queue, *this, slice, lws_hint());
664  }
665  while(window.slide_window_slice_3D(slice) && win_in.slide_window_slice_3D(slice_in));
666  }
667 }
668 } // namespace kernels
669 } // namespace opencl
670 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:981
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
#define ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(tensor)
Definition: CLValidate.h:35
bool image2d_from_buffer_supported(const cl::Device &device)
Helper function to check whether the cl_khr_image2d_from_buffer extension is supported.
Definition: CLHelpers.cpp:370
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
Shape of a tensor.
Definition: TensorShape.h:39
const size_t conv_pad_left
bool enabled() const
Check if initialised.
Definition: Types.h:1559
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
Container for 2D border size.
Definition: Types.h:269
void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint=CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items=false)
Add the kernel to the command queue with the given window.
Definition: ICLKernel.cpp:32
const StringSet & options() const
Gets the current options list set.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
constexpr int step() const
Return the step of the dimension.
Definition: Window.h:104
cl::NDRange lws_hint() const
Return the Local-Workgroup-Size hint.
Definition: ICLKernel.h:318
float a() const
Get the alpha value.
Definition: Types.h:1549
void get(uint8_t &v) const
Interpret the pixel value as a U8.
Definition: PixelValue.h:244
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
std::string to_string(T &&value)
Convert integer and float values to string.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
const std::string & string_from_activation_func(ActivationLayerInfo::ActivationFunction act)
Translates a given activation function to a string.
Definition: Utils.cpp:163
static CLKernelLibrary & get()
Access the KernelLibrary singleton.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info, const GPUTarget target)
Static function to check if given info will lead to a valid configuration.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Quantization info when assuming per layer quantization.
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:77
Manages all the OpenCL kernels compilation and caching, provides accessors for the OpenCL Context...
unsigned int pad_top() const
Get the top padding.
Definition: Types.h:740
Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplier, int32_t *shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier.
Status class.
Definition: Error.h:52
GPUTarget get_arch_from_target(GPUTarget target)
Helper function to get the GPU arch.
Definition: GPUTarget.cpp:193
std::string lower_string(const std::string &val)
Lower a given string.
Definition: Utils.cpp:326
Activation Layer Information class.
Definition: Types.h:1509
std::set< std::string > build_options
void update_padding_for_cl_image(ITensorInfo *tensor)
Update padding required to export the OpenCL buffer to OpenCL image2d.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(...)
Definition: Validate.h:284
void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 3D tensor&#39;s parameters to the object&#39;s kernel&#39;s arguments starting from the index idx...
Definition: ICLKernel.h:214
void use_tensor_dimensions(const TensorShape &shape, size_t first_dimension=Window::DimX)
Use the tensor&#39;s dimensions to fill the window dimensions.
Definition: Window.inl:276
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2021 Arm Limited.
void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
Set the src, weights, biases and dst tensors info.
1 channel, 1 F16 per channel
1 channel, 1 S32 per channel
void add_option(std::string option)
Adds option to the existing build option list.
const DataType data_type
Definition: Im2Col.cpp:150
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Definition: ITensorPack.cpp:54
cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set< std::string > &build_opts=std::set< std::string >())
Creates an opencl kernel using a compile context.
Definition: CLHelpers.cpp:391
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
Definition: Utils.cpp:135
std::string get_data_size_from_data_type(const DataType &dt)
Get the size of a data type in number of bits.
Definition: CLHelpers.cpp:193
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
bool update_window_and_padding(Window &win, Ts &&... patterns)
Update window and padding size for each of the access patterns.
Definition: WindowHelpers.h:46
static constexpr unsigned int num_arguments_per_3D_tensor()
Returns the number of arguments enqueued per 3D tensor object.
Definition: ICLKernel.h:256
std::string float_to_string_with_full_precision(float val)
Create a string with the float in full precision.
Definition: Utils.h:1075
auto ceil_to_multiple(S value, T divisor) -> decltype(((value+divisor - 1)/divisor) *divisor)
Computes the smallest number larger or equal to value that is a multiple of divisor.
Definition: Utils.h:71
quantized, asymmetric fixed-point 8-bit number unsigned
std::pair< unsigned int, unsigned int > stride() const
Get the stride.
Definition: Types.h:704
GPUTarget get_target() const
Get the targeted GPU architecture.
Definition: ICLKernel.h:378
UniformQuantizationInfo uniform() const
Return per layer quantization info.
std::string get_cl_type_from_data_type(const DataType &dt)
Translates a tensor data type to the appropriate OpenCL type.
Definition: CLHelpers.cpp:39
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
void add_option_if(bool cond, std::string option)
Adds option if a given condition is true;.
Padding and stride information class.
Definition: Types.h:656
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
virtual PaddingSize padding() const =0
Padding of tensor.
bool slide_window_slice_3D(Window &slice) const
Slide the passed 3D window slice.
Definition: Window.h:335
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
BorderSize border_size() const override
The size of the border for that kernel.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:915
const size_t conv_stride_x
Num samples, channels, height, width.
CLCompileContext class.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1003
size_t get_cl_image_pitch_alignment(const cl::Device &device)
Helper function to get the cl_image pitch alignment in pixels.
Definition: CLHelpers.cpp:375
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
void set_dimension_step(size_t dimension, int step)
Set the step of a given dimension.
Definition: Window.inl:167
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
Definition: ITensorPack.cpp:64
const std::string & string_from_data_layout(DataLayout dl)
Convert a data layout identity into a string.
Definition: Utils.cpp:123
#define ARM_COMPUTE_CREATE_ERROR(error_code, msg)
Creates an error with a given message.
Definition: Error.h:159
const size_t conv_pad_top
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
GPUTarget
Available GPU Targets.
Definition: GPUTarget.h:34
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
constexpr int step
Definition: fp32.cpp:35
void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override
Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue...
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:541
Num samples, height, width, channels.
constexpr const Dimension & y() const
Alias to access the second dimension of the window.
Definition: Window.h:154
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
Tensor packing service.
Definition: ITensorPack.h:39
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
cl::Image2D create_image2d_from_buffer(const cl::Context &ctx, const cl::Buffer &buffer, const TensorShape &shape2d, DataType data_type, size_t image_row_pitch)
Create a cl::Image2D object from an OpenCL buffer.
Definition: CLUtils.cpp:35
unsigned int adjust_vec_size(unsigned int vec_size, size_t dim0)
Returns the adjusted vector size in case it is less than the input&#39;s first dimension, getting rounded down to its closest valid vector size.
Definition: Utils.h:1171
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1544
float b() const
Get the beta value.
Definition: Types.h:1554
quantized, asymmetric fixed-point 8-bit number signed
void adjust(size_t dimension, int adjust_value, bool is_at_start)
Adjust the start or end of a given dimension by the given value.
Definition: Window.inl:140
const size_t conv_stride_y
void add_1D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 1D tensor&#39;s parameters to the object&#39;s kernel&#39;s arguments starting from the index idx...
Definition: ICLKernel.h:166
Window first_slice_window_3D() const
First 3D slice of the window.
Definition: Window.h:291
std::string kernel_name
DataType
Available data types.
Definition: Types.h:79
void add_4D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 4D tensor&#39;s parameters to the object&#39;s kernel&#39;s arguments starting from the index idx...
Definition: ICLKernel.h:224
unsigned int pad_left() const
Get the left padding.
Definition: Types.h:730
DataLayout
[DataLayout enum definition]
Definition: Types.h:113
Describe a multidimensional execution window.
Definition: Window.h:39
TensorShape compute_deep_convolution_shape(const TensorShape &input_shape, DataLayout input_data_layout, const TensorShape &weights_shape, const PadStrideInfo &conv_info)
Calculate the deep convolution shape output shape of a tensor.
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:961
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:201
bool gpu_target_is_in(GPUTarget target_to_check, GPUTarget target, Args... targets)
Helper function to check whether a gpu target is equal to the provided targets.
Definition: GPUTarget.h:97
SimpleTensor< T > slice(const SimpleTensor< T > &src, Coordinates starts, Coordinates ends)
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
const cl::Device & get_device()
Gets the CL device for which the programs are created.
void add_option_if_else(bool cond, std::string option_true, std::string option_false)
Adds first option if condition is true else the second one.