Compute Library
 21.02
CLDirectConvolutionLayerKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
32 #include "arm_compute/core/Utils.h"
36 #include "src/core/CL/CLValidate.h"
39 #include "support/StringSupport.h"
40 
41 namespace arm_compute
42 {
43 namespace
44 {
45 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
46 {
50 
51  const DataLayout data_layout = input->data_layout();
52  const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
53  const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
54  const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
55 
56  ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != weights->dimension(height_idx), "Weights should have same width and height");
57  ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(channel_idx) != input->dimension(channel_idx),
58  "Weights feature map dimension should match the respective input's one");
59  ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 4, "Weights can be at most 4 dimensional");
60  ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 1) && std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported for 1x1 convolution.");
61  ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 3 || weights->dimension(width_idx) == 5 || weights->dimension(width_idx) == 9)
62  && std::get<0>(conv_info.stride()) > 2,
63  "Strides larger than 2 not supported for 3x3, 5x5, 9x9 convolution.");
64 
65  if(data_layout == DataLayout::NCHW)
66  {
67  if(is_data_type_quantized(input->data_type()))
68  {
69  ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5 && weights->dimension(width_idx) != 9,
70  "Kernel sizes other than 1x1, 3x3, 5x5 or 9x9 are not supported with quantized data types");
71  }
72  else
73  {
74  ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5,
75  "Kernel sizes other than 1x1, 3x3 or 5x5 are not supported with float data types");
76  }
77  }
78 
79  if(biases != nullptr)
80  {
81  if(is_data_type_quantized_asymmetric(input->data_type()))
82  {
84  }
85  else
86  {
88  }
89  ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->dimension(0) != weights->dimension(3),
90  "Biases size and number of input feature maps should match");
91  ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->num_dimensions() > 1,
92  "Biases should be one dimensional");
93  }
94 
95  // Checks performed when output is configured
96  if(output->total_size() != 0)
97  {
99  misc::shape_calculator::compute_deep_convolution_shape(*input, *weights, conv_info));
101  }
102 
103  const auto data_type = input->data_type();
105  {
106  const UniformQuantizationInfo iqinfo = input->quantization_info().uniform();
107  const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
108  const UniformQuantizationInfo oqinfo = output->quantization_info().uniform();
109 
110  float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
111  int output_multiplier = 0;
112  int output_shift = 0;
113  ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
114  }
115  return Status{};
116 }
117 
118 inline bool can_run_optimized_kernel_for_bifrost_nchw(GPUTarget gpu_target, unsigned int conv_stride_x, unsigned int conv_stride_y, unsigned int kernel_size,
119  DataType data_type, DataLayout data_layout)
120 {
121  return gpu_target_is_in(gpu_target,
125  && (kernel_size <= 5)
126  && (conv_stride_x == 1) && (conv_stride_y == 1)
127  && (data_type == DataType::F32)
128  && (data_layout == DataLayout::NCHW);
129 }
130 
131 inline void setup_num_elems_nchw(unsigned int &num_elems_read_per_iteration_x, unsigned int &num_elems_read_per_iteration_y,
132  unsigned int &num_elems_written_per_iteration_x, unsigned int &num_elems_written_per_iteration_y,
133  unsigned int kernel_size, const PadStrideInfo &conv_info, const GPUTarget target, ITensorInfo *input)
134 {
135  const DataType data_type = input->data_type();
136  const DataLayout data_layout = input->data_layout();
137  unsigned int conv_stride_x = std::get<0>(conv_info.stride());
138  unsigned int conv_stride_y = std::get<1>(conv_info.stride());
139 
140  const bool run_optimized_bifrost = can_run_optimized_kernel_for_bifrost_nchw(target, conv_stride_x, conv_stride_y, kernel_size, data_type, data_layout);
141 
142  if(run_optimized_bifrost)
143  {
144  // Configure kernel window
145  switch(kernel_size)
146  {
147  case 1:
148  {
149  num_elems_read_per_iteration_x = 4;
150  num_elems_read_per_iteration_y = 4;
151  num_elems_written_per_iteration_x = 4;
152  num_elems_written_per_iteration_y = 4;
153  break;
154  }
155  case 3:
156  {
157  num_elems_read_per_iteration_x = 6;
158  num_elems_read_per_iteration_y = 5;
159  num_elems_written_per_iteration_x = 4;
160  num_elems_written_per_iteration_y = 3;
161  break;
162  }
163  case 5:
164  {
165  num_elems_read_per_iteration_x = 8;
166  num_elems_read_per_iteration_y = 6;
167  num_elems_written_per_iteration_x = 4;
168  num_elems_written_per_iteration_y = 2;
169  break;
170  }
171  default:
172  {
173  ARM_COMPUTE_ERROR("Kernel size not optimized for Bifrost");
174  }
175  }
176  }
177  else
178  {
179  num_elems_read_per_iteration_y = kernel_size;
180  num_elems_written_per_iteration_x = 8;
181  num_elems_written_per_iteration_y = 1;
182  switch(kernel_size)
183  {
184  case 1:
185  switch(conv_stride_x)
186  {
187  case 1:
188  num_elems_read_per_iteration_x = 8;
189  break;
190  case 2:
191  num_elems_read_per_iteration_x = 16;
192  break;
193  case 3:
194  switch(input->element_size())
195  {
196  case 1:
197  num_elems_read_per_iteration_x = 28;
198  break;
199  case 2:
200  num_elems_read_per_iteration_x = 24;
201  break;
202  case 4:
203  num_elems_read_per_iteration_x = 22;
204  break;
205  default:
206  ARM_COMPUTE_ERROR("Invalid data size");
207  }
208  break;
209  default:
210  ARM_COMPUTE_ERROR("Invalid convolution stride X");
211  }
212  break;
213  case 3:
214  switch(conv_stride_x)
215  {
216  case 1:
217  num_elems_read_per_iteration_x = 10;
218  break;
219  case 2:
220  num_elems_read_per_iteration_x = 17;
221  break;
222  default:
223  ARM_COMPUTE_ERROR("Invalid convolution stride X");
224  }
225  break;
226  case 5:
227  switch(conv_stride_x)
228  {
229  case 1:
230  num_elems_read_per_iteration_x = 12;
231  break;
232  case 2:
233  num_elems_read_per_iteration_x = 20;
234  break;
235  default:
236  ARM_COMPUTE_ERROR("Invalid convolution stride X");
237  }
238  break;
239  case 9:
240  switch(conv_stride_x)
241  {
242  case 1:
243  num_elems_read_per_iteration_x = 16;
244  break;
245  case 2:
246  num_elems_read_per_iteration_x = 24;
247  break;
248  default:
249  ARM_COMPUTE_ERROR("Invalid convolution stride X");
250  }
251  break;
252  default:
253  ARM_COMPUTE_ERROR("Invalid direct convolution size");
254  }
255  }
256 }
257 
258 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, const GPUTarget target)
259 {
260  const DataLayout data_layout = input->data_layout();
261 
262  // Get output shape
263  TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*input, *weights, conv_info);
264 
265  // Output auto inizialitation if not yet initialized
266  auto_init_if_empty(*output, output_shape,
267  1,
268  input->data_type(),
269  input->quantization_info());
270 
271  if(data_layout == DataLayout::NHWC)
272  {
273  const unsigned int vec_size = std::min(static_cast<unsigned int>(output->tensor_shape()[0]), 4u);
274 
275  // Create window and update padding
276  Window win = calculate_max_window(*output, Steps(vec_size, 1U));
277  output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
278  Status err = Status{};
279  return std::make_pair(err, win);
280  }
281  else if(data_layout == DataLayout::NCHW)
282  {
283  const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
284  const unsigned int kernel_size = weights->dimension(width_idx);
285 
286  unsigned int num_elems_read_per_iteration_x = 0;
287  unsigned int num_elems_read_per_iteration_y = 0;
288  unsigned int num_elems_written_per_iteration_x = 0;
289  unsigned int num_elems_written_per_iteration_y = 0;
290 
291  unsigned int conv_pad_left = conv_info.pad_left();
292  unsigned int conv_pad_top = conv_info.pad_top();
293  unsigned int conv_stride_x = std::get<0>(conv_info.stride());
294  unsigned int conv_stride_y = std::get<1>(conv_info.stride());
295 
296  setup_num_elems_nchw(num_elems_read_per_iteration_x, num_elems_read_per_iteration_y,
297  num_elems_written_per_iteration_x, num_elems_written_per_iteration_y,
298  kernel_size, conv_info, target, input);
299 
300  // Create window and update padding
301  bool window_changed = false;
302  Window win = calculate_max_window(*output, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y));
303 
304  AccessWindowRectangle input_access(input, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration_x, num_elems_read_per_iteration_y, conv_stride_x, conv_stride_y);
305  AccessWindowStatic weights_access(weights, 0, 0, kernel_size, kernel_size);
306  AccessWindowRectangle output_access(output, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y);
307  window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
308  output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
309  Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
310  return std::make_pair(err, win);
311  }
312  else
313  {
314  ARM_COMPUTE_ERROR("Not supported");
315  }
316 }
317 } // namespace
318 
320  : _input(nullptr), _biases(nullptr), _weights(nullptr), _output(nullptr), _data_layout(DataLayout::UNKNOWN), _border_size(0), _conv_stride_x(0), _conv_stride_y(0), _conv_info()
321 {
322 }
323 
325 {
326  return _border_size;
327 }
328 
329 void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
330 {
331  configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info);
332 }
333 
334 void CLDirectConvolutionLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
335  const PadStrideInfo &conv_info)
336 {
337  ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
338 
339  // Perform validation
341  weights->info(),
342  (biases != nullptr) ? biases->info() : nullptr,
343  output->info(),
344  conv_info));
345 
346  _conv_stride_x = std::get<0>(conv_info.stride());
347  _conv_stride_y = std::get<1>(conv_info.stride());
348  _data_layout = input->info()->data_layout();
349  _input = input;
350  _weights = weights;
351  _output = output;
352  _biases = biases;
354 
355  const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
356  const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
357  const unsigned int channel_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
358  const unsigned int kernel_size = weights->info()->dimension(width_idx);
359  const DataType data_type = input->info()->data_type();
360 
361  const GPUTarget gpu_target = get_target();
362 
363  // Configure kernel window
364  auto win_config = validate_and_configure_window(input->info(), weights->info(), output->info(), conv_info, gpu_target);
365  ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
366  ICLKernel::configure_internal(win_config.second);
367 
368  std::stringstream kernel_name;
370 
371  if(_data_layout == DataLayout::NHWC)
372  {
374 
375  kernel_name << "direct_convolution_nhwc";
376 
377  const unsigned int n0 = win_config.second.x().step();
378  const unsigned int m0 = win_config.second.y().step();
379  const unsigned int k0 = adjust_vec_size(16u, _input->info()->dimension(channel_idx));
380  const unsigned int partial_store_n0 = _output->info()->dimension(channel_idx) % n0;
381  const unsigned int partial_store_m0 = (_output->info()->dimension(width_idx) * _output->info()->dimension(height_idx)) % m0;
382  const unsigned int pad_left = conv_info.pad_left();
383  const unsigned int pad_top = conv_info.pad_top();
384 
385  if(_biases != nullptr)
386  {
387  build_options.add_option(std::string("-DHAS_BIAS"));
388  build_options.add_option(std::string("-DBIA_DATA_TYPE=" + get_cl_type_from_data_type(_biases->info()->data_type())));
389  }
390  build_options.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(_input->info()->dimension(width_idx)));
391  build_options.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(_input->info()->dimension(height_idx)));
392  build_options.add_option("-DSRC_CHANNELS=" + support::cpp11::to_string(_input->info()->dimension(channel_idx)));
393  build_options.add_option("-DSRC_DATA_TYPE=" + get_cl_type_from_data_type(_input->info()->data_type()));
394  build_options.add_option("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(width_idx)));
395  build_options.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(_output->info()->dimension(height_idx)));
396  build_options.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(_output->info()->dimension(channel_idx)));
397  build_options.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(_output->info()->data_type()));
398  build_options.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(_weights->info()->dimension(width_idx)));
399  build_options.add_option("-DWEI_HEIGHT=" + support::cpp11::to_string(_weights->info()->dimension(height_idx)));
400  build_options.add_option("-DWEI_DATA_TYPE=" + get_cl_type_from_data_type(_weights->info()->data_type()));
401  build_options.add_option("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x));
402  build_options.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(_conv_stride_y));
403  build_options.add_option("-DPAD_LEFT=" + support::cpp11::to_string(pad_left));
404  build_options.add_option("-DPAD_TOP=" + support::cpp11::to_string(pad_top));
405  build_options.add_option("-DN0=" + support::cpp11::to_string(n0));
406  build_options.add_option("-DM0=" + support::cpp11::to_string(m0));
407  build_options.add_option("-DK0=" + support::cpp11::to_string(k0));
408  build_options.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
409  build_options.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
410 
411  if(is_data_type_quantized(data_type))
412  {
416 
417  PixelValue zero_value = PixelValue(0, input->info()->data_type(), input->info()->quantization_info());
418  int zero_value_s32;
419  zero_value.get(zero_value_s32);
420 
421  float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
422  int output_multiplier = 0;
423  int output_shift = 0;
424  quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
425  build_options.add_option("-DIS_QUANTIZED");
426  build_options.add_option("-DDST_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
427  build_options.add_option("-DDST_SHIFT=" + support::cpp11::to_string(output_shift));
428  build_options.add_option("-DSRC_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
429  build_options.add_option("-DWEI_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
430  build_options.add_option("-DDST_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
431  build_options.add_option("-DZERO_VALUE=" + support::cpp11::to_string(zero_value_s32));
432  build_options.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(DataType::S32));
433  }
434  else
435  {
436  build_options.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(data_type));
437  build_options.add_option("-DSRC_OFFSET=" + support::cpp11::to_string(0));
438  build_options.add_option("-DWEI_OFFSET=" + support::cpp11::to_string(0));
439  build_options.add_option("-DDST_OFFSET=" + support::cpp11::to_string(0));
440  }
441  }
442  else
443  {
445 
446  kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size;
447 
448  build_options.add_option_if(_biases != nullptr, std::string("-DHAS_BIAS"));
449 
450  const bool run_optimized_for_bifrost = can_run_optimized_kernel_for_bifrost_nchw(gpu_target, _conv_stride_x, _conv_stride_y, kernel_size, data_type, _data_layout);
451 
452  if(run_optimized_for_bifrost)
453  {
454  build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx))));
455 
456  kernel_name << "_f32_bifrost";
457  }
458  else
459  {
460  build_options.add_option(std::string("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)));
461  build_options.add_option(std::string("-DDATA_SIZE=" + get_data_size_from_data_type(data_type)));
462  build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx))));
463  build_options.add_option(std::string("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x)));
464  build_options.add_option(std::string("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(data_type)));
465 
466  if(is_data_type_quantized(data_type))
467  {
471 
472  float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
473  int output_multiplier = 0;
474  int output_shift = 0;
475  quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
476  build_options.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
477  build_options.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
478  build_options.add_option("-DKERNEL_SIZE=" + support::cpp11::to_string(kernel_size));
479  build_options.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
480  build_options.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
481  build_options.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
482 
483  kernel_name.str("direct_convolution_quantized");
484  }
485  }
486  }
487 
488  _kernel = create_kernel(compile_context, kernel_name.str(), build_options.options());
489 
490  // Set config_id for enabling LWS tuning
491  _config_id = kernel_name.str();
492  _config_id += "_";
493  _config_id += lower_string(string_from_data_type(data_type));
494  _config_id += "_";
495  _config_id += support::cpp11::to_string(kernel_size);
496  _config_id += "_";
497  _config_id += support::cpp11::to_string(border_size().left);
498  _config_id += "_";
499  _config_id += support::cpp11::to_string(border_size().top);
500  _config_id += "_";
501  _config_id += support::cpp11::to_string(border_size().right);
502  _config_id += "_";
503  _config_id += support::cpp11::to_string(border_size().bottom);
504  _config_id += "_";
506  _config_id += "_";
507  _config_id += support::cpp11::to_string(_conv_stride_y);
508  _config_id += "_";
509  _config_id += support::cpp11::to_string(output->info()->dimension(width_idx));
510  _config_id += "_";
511  _config_id += support::cpp11::to_string(output->info()->dimension(height_idx));
512  _config_id += "_";
513  _config_id += lower_string(string_from_data_layout(_data_layout));
514 }
515 
516 Status CLDirectConvolutionLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
517  const GPUTarget target)
518 {
519  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info));
520  ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info, target).first);
521 
522  return Status{};
523 }
524 
525 void CLDirectConvolutionLayerKernel::run(const Window &window, cl::CommandQueue &queue)
526 {
529 
530  // Get initial windows
532 
534  {
536  slice.set(Window::DimZ, Window::Dimension(0, _output->info()->dimension(3), 1));
537 
538  unsigned int idx = 0;
539  add_3D_tensor_argument(idx, _input, slice);
540  add_3D_tensor_argument(idx, _output, slice);
541  add_3D_tensor_argument(idx, _weights, slice);
542  if(_biases != nullptr)
543  {
544  add_1D_tensor_argument(idx, _biases, slice);
545  }
546  _kernel.setArg(idx++, static_cast<unsigned int>(_weights->info()->strides_in_bytes()[3]));
547  enqueue(queue, *this, slice, lws_hint());
548  }
549  else
550  {
551  Window win_in = window;
552 
553  win_in.adjust(Window::DimX, -_conv_info.pad_left(), true);
554  win_in.adjust(Window::DimY, -_conv_info.pad_top(), true);
555 
558 
559  win_in.set_dimension_step(width_idx, window[width_idx].step() * _conv_stride_x);
560  win_in.set_dimension_step(height_idx, window[height_idx].step() * _conv_stride_y);
561 
562  Window slice_in = win_in.first_slice_window_3D();
563  unsigned int idx1 = 2 * num_arguments_per_3D_tensor();
564  add_3D_tensor_argument(idx1, _weights, slice);
565 
566  if(_biases != nullptr)
567  {
568  Window slice_biases;
569  slice_biases.use_tensor_dimensions(_biases->info()->tensor_shape());
570  add_1D_tensor_argument(idx1, _biases, slice_biases);
571  }
572 
573  _kernel.setArg(idx1++, static_cast<unsigned int>(_weights->info()->strides_in_bytes()[3]));
574 
575  do
576  {
577  unsigned int idx = 0;
578  add_3D_tensor_argument(idx, _input, slice_in);
579  add_3D_tensor_argument(idx, _output, slice);
580  enqueue(queue, *this, slice, lws_hint());
581  }
582  while(window.slide_window_slice_3D(slice) && win_in.slide_window_slice_3D(slice_in));
583  }
584 }
585 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:1168
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
#define ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(tensor)
Definition: CLValidate.h:35
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
void enqueue(IGCKernel &kernel, const Window &window, const gles::NDRange &lws=gles::NDRange(1U, 1U, 1U))
Add the kernel to the command queue with the given window.
Definition: IGCKernel.cpp:41
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
Container for 2D border size.
Definition: Types.h:273
const StringSet & options() const
Gets the current options list set.
TensorShape compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info)
Calculate the deep convolution shape output shape of a tensor.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
cl::NDRange lws_hint() const
Return the Local-Workgroup-Size hint.
Definition: ICLKernel.h:276
void get(uint8_t &v) const
Interpret the pixel value as a U8.
Definition: PixelValue.h:241
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
std::string to_string(T &&value)
Convert integer and float values to string.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
const DataLayout data_layout
Definition: Im2Col.cpp:151
static CLKernelLibrary & get()
Access the KernelLibrary singleton.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Quantization info when assuming per layer quantization.
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:77
unsigned int pad_top() const
Get the top padding.
Definition: Types.h:806
Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplier, int32_t *shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier.
Status class.
Definition: Error.h:52
std::string lower_string(const std::string &val)
Lower a given string.
Definition: Utils.cpp:350
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(...)
Definition: Validate.h:288
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const GPUTarget target)
Static function to check if given info will lead to a valid configuration of CLDirectConvolutionLayer...
void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 3D tensor&#39;s parameters to the object&#39;s kernel&#39;s arguments starting from the index idx...
Definition: ICLKernel.h:172
void use_tensor_dimensions(const TensorShape &shape, size_t first_dimension=Window::DimX)
Use the tensor&#39;s dimensions to fill the window dimensions.
Definition: Window.inl:276
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
1 channel, 1 S32 per channel
void add_option(std::string option)
Adds option to the existing build option list.
const DataType data_type
Definition: Im2Col.cpp:150
cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set< std::string > &build_opts=std::set< std::string >())
Creates an opencl kernel using a compile context.
Definition: CLHelpers.cpp:403
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
Definition: Utils.cpp:135
std::string get_data_size_from_data_type(const DataType &dt)
Get the size of a data type in number of bits.
Definition: CLHelpers.cpp:191
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
bool update_window_and_padding(Window &win, Ts &&... patterns)
Update window and padding size for each of the access patterns.
Definition: WindowHelpers.h:46
static constexpr unsigned int num_arguments_per_3D_tensor()
Returns the number of arguments enqueued per 3D tensor object.
Definition: ICLKernel.h:214
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
std::set< std::string > build_options
void run(const Window &window, cl::CommandQueue &queue) override
Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue...
std::pair< unsigned int, unsigned int > stride() const
Get the stride.
Definition: Types.h:770
std::string kernel_name
GPUTarget get_target() const
Get the targeted GPU architecture.
Definition: ICLKernel.h:336
UniformQuantizationInfo uniform() const
Return per layer quantization info.
std::string get_cl_type_from_data_type(const DataType &dt)
Translates a tensor data type to the appropriate OpenCL type.
Definition: CLHelpers.cpp:37
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
void add_option_if(bool cond, std::string option)
Adds option if a given condition is true;.
Padding and stride information class.
Definition: Types.h:722
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
virtual PaddingSize padding() const =0
Padding of tensor.
bool slide_window_slice_3D(Window &slice) const
Slide the passed 3D window slice.
Definition: Window.h:335
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941
Num samples, channels, height, width.
CLCompileContext class.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1190
BorderSize border_size() const override
The size of the border for that kernel.
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
void set_dimension_step(size_t dimension, int step)
Set the step of a given dimension.
Definition: Window.inl:167
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
const std::string & string_from_data_layout(DataLayout dl)
Convert a data layout identity into a string.
Definition: Utils.cpp:123
#define ARM_COMPUTE_CREATE_ERROR(error_code, msg)
Creates an error with a given message.
Definition: Error.h:159
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
GPUTarget
Available GPU Targets.
Definition: GPUTarget.h:34
constexpr int step
Definition: fp32.cpp:35
Manages all the OpenCL kernels compilation and caching, provides accessors for the OpenCL Context...
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
Num samples, height, width, channels.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
unsigned int adjust_vec_size(unsigned int vec_size, size_t dim0)
Returns the adjusted vector size in case it is less than the input&#39;s first dimension, getting rounded down to its closest valid vector size.
Definition: Utils.h:1358
quantized, asymmetric fixed-point 8-bit number signed
virtual const Strides & strides_in_bytes() const =0
The strides in bytes for accessing each dimension of the tensor.
size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
void adjust(size_t dimension, int adjust_value, bool is_at_start)
Adjust the start or end of a given dimension by the given value.
Definition: Window.inl:140
void add_1D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 1D tensor&#39;s parameters to the object&#39;s kernel&#39;s arguments starting from the index idx...
Definition: ICLKernel.h:124
Window first_slice_window_3D() const
First 3D slice of the window.
Definition: Window.h:291
DataType
Available data types.
Definition: Types.h:77
unsigned int pad_left() const
Get the left padding.
Definition: Types.h:796
DataLayout
[DataLayout enum definition]
Definition: Types.h:120
Describe a multidimensional execution window.
Definition: Window.h:39
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205
bool gpu_target_is_in(GPUTarget target_to_check, GPUTarget target, Args... targets)
Helper function to check whether a gpu target is equal to the provided targets.
Definition: GPUTarget.h:96
SimpleTensor< T > slice(const SimpleTensor< T > &src, Coordinates starts, Coordinates ends)
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
Set the input, weights, biases and output tensors.