Compute Library
 21.02
CLDepthwiseConvolutionLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
40 
41 namespace arm_compute
42 {
43 using namespace arm_compute::misc;
45 
46 namespace
47 {
48 Status validate_arguments_3x3(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
49  unsigned int depth_multiplier, ActivationLayerInfo act_info, GPUTarget gpu_target, const Size2D &dilation)
50 {
51  // This function should be removed and incorporated inside CLDepthwiseConvolutionLayerInternal3x3 once CLDepthwiseConvolutionLayer3x3 is properly removed
52  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
54  ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN);
55 
56  const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
57  const bool is_nhwc = input->data_layout() == DataLayout::NHWC;
58  const bool needs_permute = is_nhwc && (depth_multiplier > 1);
59  const bool needs_weights_reshape = is_nhwc && (depth_multiplier == 1) && is_quantized;
60  const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1));
61  const bool is_stride_1_dilation_1 = (is_stride_1 && dilation.x() == 1 && dilation.y() == 1);
62  const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device());
63  DepthwiseConvolutionReshapeInfo info;
64  info.c0 = 4;
65  info.transpose = is_stride_1_dilation_1 && is_dot8_supported;
66 
67  TensorInfo output_multipliers_shifts_info(TensorInfo(TensorShape(1U), 1, DataType::S32));
68  if(is_quantized)
69  {
70  if(is_data_type_quantized_per_channel(weights->data_type()))
71  {
73 
74  const size_t idx_c = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::CHANNEL);
75  output_multipliers_shifts_info.set_tensor_shape(TensorShape(weights->dimension(idx_c)));
76  }
77  else
78  {
80  }
81  }
82 
83  if(needs_permute)
84  {
85  TensorShape permuted_input_shape = input->tensor_shape();
86  TensorShape permuted_weights_shape = weights->tensor_shape();
87  TensorShape permuted_output_shape = shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
88 
89  permute(permuted_input_shape, PermutationVector(1U, 2U, 0U));
90  permute(permuted_weights_shape, PermutationVector(1U, 2U, 0U));
91  permute(permuted_output_shape, PermutationVector(1U, 2U, 0U));
92 
93  const TensorInfo permuted_input = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_input_shape).set_data_layout(DataLayout::NCHW);
94  const TensorInfo permuted_weights = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_weights_shape).set_data_layout(DataLayout::NCHW);
95  const TensorInfo permuted_output = output->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_output_shape).set_data_layout(DataLayout::NCHW);
96 
97  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayer3x3NCHWKernel::validate(&permuted_input, &permuted_weights, biases, &permuted_output,
98  conv_info, depth_multiplier, act_info, gpu_target,
99  dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
100  }
101  else if(is_nhwc)
102  {
103  if(needs_weights_reshape)
104  {
105  auto reshaped_weights_shape = arm_compute::misc::shape_calculator::compute_reshaped_depthwise_weights_shape(*weights, info);
106  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayer3x3NHWCKernel::validate(input, &weights->clone()->set_tensor_shape(reshaped_weights_shape), biases,
107  output, conv_info, depth_multiplier, act_info,
108  dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
109  }
110  else
111  {
112  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayer3x3NHWCKernel::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info,
113  dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
114  }
115  }
116  else
117  {
118  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayer3x3NCHWKernel::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, gpu_target,
119  dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
120  }
121  return Status{};
122 }
123 } // namespace
124 
125 CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::CLDepthwiseConvolutionLayerGeneric(std::shared_ptr<IMemoryManager> memory_manager)
126  : _memory_group(std::move(memory_manager)),
127  _dwc_native_kernel(std::make_unique<CLDepthwiseConvolutionLayerNativeKernel>()),
128  _permute_input_to_nhwc(),
129  _permute_weights_to_nhwc(),
130  _permute_output_to_nchw(),
131  _permuted_input(),
132  _permuted_weights(),
133  _permuted_output(),
134  _output_multipliers(),
135  _output_shifts(),
136  _original_weights(),
137  _input(),
138  _output(),
139  _needs_permute(false),
140  _is_prepared(false),
141  _is_quantized(false)
142 {
143 }
144 
146 
147 void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
148  unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
149 {
150  configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
151 }
152 
153 void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases,
154  ICLTensor *output, const PadStrideInfo &conv_info,
155  unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
156 {
157  ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
159  weights->info(),
160  biases != nullptr ? biases->info() : nullptr,
161  output->info(),
162  conv_info,
163  depth_multiplier,
164  act_info,
165  dilation));
166 
167  _is_quantized = is_data_type_quantized(input->info()->data_type());
168  _is_prepared = false;
169  _original_weights = weights;
170  _input = input;
171  _output = output;
172  _needs_permute = input->info()->data_layout() == DataLayout::NCHW;
173 
174  ICLTensor *input_to_use = input;
175  const ICLTensor *weights_to_use = weights;
176  ICLTensor *output_to_use = output;
177  if(_needs_permute)
178  {
179  _memory_group.manage(&_permuted_input);
180  _memory_group.manage(&_permuted_output);
181 
182  // Configure the function to transform the input tensor from NCHW -> NHWC
183  _permute_input_to_nhwc.configure(compile_context, input, &_permuted_input, PermutationVector(2U, 0U, 1U));
184  _permuted_input.info()->set_data_layout(DataLayout::NHWC);
185 
186  // Configure the function to transform the weights tensor from IHW -> HWI
187  _permute_weights_to_nhwc.configure(compile_context, weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
188  _permuted_weights.info()->set_data_layout(DataLayout::NHWC);
189 
190  // Set output quantization info before dwc kernel configure
191  _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
192 
193  input_to_use = &_permuted_input;
194  weights_to_use = &_permuted_weights;
195  output_to_use = &_permuted_output;
196  }
197 
198  CLTensor *output_multipliers_to_use = nullptr;
199  CLTensor *output_shifts_to_use = nullptr;
200  if(_is_quantized)
201  {
202  const size_t idx_c = get_data_layout_dimension_index(weights->info()->data_layout(), DataLayoutDimension::CHANNEL);
203  const size_t num_filters = (is_data_type_quantized_per_channel(weights->info()->data_type())) ? weights->info()->dimension(idx_c) : 1;
204 
205  _output_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
206  _output_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
207 
208  output_multipliers_to_use = &_output_multipliers;
209  output_shifts_to_use = &_output_shifts;
210  }
211 
212  DWCWeightsKernelInfo dwc_weights_info;
213  dwc_weights_info.n0 = (depth_multiplier == 1) ? 8 : 1;
214  DWCKernelInfo dwc_info;
215  dwc_info.activation_info = act_info;
216  _dwc_native_kernel->configure(compile_context, input_to_use, weights_to_use, biases, output_to_use,
217  dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation,
218  output_multipliers_to_use, output_shifts_to_use);
219 
220  if(_needs_permute)
221  {
222  _permuted_input.allocator()->allocate();
223 
224  // Configure the function to transform the convoluted output to NCHW format
225  _permuted_output.info()->set_data_layout(DataLayout::NCHW);
226  _permute_output_to_nchw.configure(compile_context, &_permuted_output, output, PermutationVector(1U, 2U, 0U));
227  _permuted_output.allocator()->allocate();
228  }
229 
230  if(_is_quantized)
231  {
232  _output_multipliers.allocator()->allocate();
233  _output_shifts.allocator()->allocate();
234  }
235 }
236 
237 Status CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
238  const PadStrideInfo &conv_info,
239  unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
240 {
242  const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
243  const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
244 
245  ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) + (weights->dimension(idx_w) - 1) * (dilation.x() - 1) > input->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right());
246  ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) + (weights->dimension(idx_h) - 1) * (dilation.y() - 1) > input->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom());
247 
248  DWCWeightsKernelInfo dwc_weights_info;
249  dwc_weights_info.n0 = (depth_multiplier == 1) ? 8 : 1;
250  DWCKernelInfo dwc_info;
251  dwc_info.activation_info = act_info;
252 
253  const bool needs_permute = input->data_layout() == DataLayout::NCHW;
254 
255  const bool is_quantized = is_data_type_quantized(input->data_type());
256 
257  TensorInfo output_multipliers_shifts_info(TensorInfo(TensorShape(1U), 1, DataType::S32));
258  if(is_quantized)
259  {
260  if(is_data_type_quantized_per_channel(weights->data_type()))
261  {
263 
264  const size_t idx_c = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::CHANNEL);
265  output_multipliers_shifts_info.set_tensor_shape(TensorShape(weights->dimension(idx_c)));
266  }
267  else
268  {
270  }
271  }
272 
273  if(needs_permute)
274  {
275  TensorShape permuted_input_shape = input->tensor_shape();
276  TensorShape permuted_weights_shape = weights->tensor_shape();
277  TensorShape permuted_output_shape = shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
278 
279  permute(permuted_input_shape, PermutationVector(2U, 0U, 1U));
280  permute(permuted_weights_shape, PermutationVector(2U, 0U, 1U));
281  permute(permuted_output_shape, PermutationVector(2U, 0U, 1U));
282 
283  const TensorInfo permuted_input = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_input_shape).set_data_layout(DataLayout::NHWC);
284  const TensorInfo permuted_weights = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_weights_shape).set_data_layout(DataLayout::NHWC);
285  const TensorInfo permuted_output = output->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_output_shape).set_data_layout(DataLayout::NHWC);
286 
287  ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(input, &permuted_input, PermutationVector(2U, 0U, 1U)));
288  ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(weights, &permuted_weights, PermutationVector(2U, 0U, 1U)));
289  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(&permuted_input, &permuted_weights, biases, &permuted_output, dwc_weights_info,
290  dwc_info, conv_info, depth_multiplier, dilation,
291  &output_multipliers_shifts_info, &output_multipliers_shifts_info));
292  ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(&permuted_output, output, PermutationVector(1U, 2U, 0U)));
293  }
294  else
295  {
296  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(input, weights, biases, output, dwc_weights_info, dwc_info, conv_info, depth_multiplier,
297  dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
298  }
299  return Status{};
300 }
301 
303 {
304  prepare();
305 
306  MemoryGroupResourceScope scope_mg(_memory_group);
307 
308  if(_needs_permute)
309  {
310  _permute_input_to_nhwc.run();
311  }
312  CLScheduler::get().enqueue(*_dwc_native_kernel);
313  if(_needs_permute)
314  {
315  _permute_output_to_nchw.run();
316  }
317 }
318 
319 void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::prepare()
320 {
321  if(!_is_prepared)
322  {
323  if(_is_quantized)
324  {
325  _output_multipliers.map();
326  _output_shifts.map();
327  const unsigned int idx_ofms = get_data_layout_dimension_index(_output->info()->data_layout(), DataLayoutDimension::CHANNEL);
329  _original_weights->info(),
330  _output->info(),
331  idx_ofms,
332  reinterpret_cast<int32_t *>(_output_multipliers.ptr_to_element(Coordinates(0))),
333  reinterpret_cast<int32_t *>(_output_shifts.ptr_to_element(Coordinates(0))));
334  _output_multipliers.unmap();
335  _output_shifts.unmap();
336  }
337 
338  if(_needs_permute)
339  {
340  ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
341 
342  _permuted_weights.allocator()->allocate();
343  _permute_weights_to_nhwc.run();
344  _original_weights->mark_as_unused();
345  }
346  _is_prepared = true;
347  }
348 }
349 
350 CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::CLDepthwiseConvolutionLayerInternal3x3(std::shared_ptr<IMemoryManager> memory_manager)
351  : _memory_group(std::move(memory_manager)),
352  _kernel(nullptr),
353  _border_handler(std::make_unique<CLFillBorderKernel>()),
354  _permute_input_to_nchw(),
355  _permute_weights_to_nchw(),
356  _permute_output_to_nhwc(),
357  _reshape_weights(std::make_unique<CLDepthwiseConvolutionLayerReshapeWeightsKernel>()),
358  _permuted_input(),
359  _permuted_weights(),
360  _permuted_output(),
361  _output_multipliers(),
362  _output_shifts(),
363  _original_weights(nullptr),
364  _input(nullptr),
365  _output(nullptr),
366  _needs_permute(false),
367  _needs_weights_reshape(false),
368  _is_prepared(false),
369  _is_quantized(false)
370 {
371 }
372 
373 void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
374  const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
375 {
376  configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
377 }
378 
379 void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases,
380  ICLTensor *output,
381  const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
382 {
383  const GPUTarget gpu_target = CLScheduler::get().target();
384 
385  // Perform validation step
386  ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
388  weights->info(),
389  biases != nullptr ? biases->info() : nullptr,
390  output->info(),
391  conv_info,
392  depth_multiplier,
393  act_info,
394  gpu_target,
395  dilation));
396 
397  const bool is_nhwc = input->info()->data_layout() == DataLayout::NHWC;
398  _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
399  _needs_permute = is_nhwc && (depth_multiplier > 1);
400  _needs_weights_reshape = is_nhwc && (depth_multiplier == 1) && _is_quantized;
401 
402  _is_prepared = false;
403  _original_weights = weights;
404  _input = input;
405  _output = output;
406 
407  ICLTensor *input_to_use = input;
408  const ICLTensor *weights_to_use = weights;
409  ICLTensor *output_to_use = output;
410 
411  const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
412  const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1));
413  const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device()) && !is_quantized_per_channel;
414  const bool is_stride_1_dilation_1 = (is_stride_1 && dilation.x() == 1 && dilation.y() == 1);
415 
416  DepthwiseConvolutionReshapeInfo info;
417  info.c0 = 4;
418  info.transpose = is_stride_1_dilation_1 && is_dot8_supported;
419 
420  if(_needs_permute)
421  {
422  _memory_group.manage(&_permuted_input);
423  _memory_group.manage(&_permuted_output);
424 
425  // Configure the function to transform the input tensor from NHWC -> NCHW
426  _permute_input_to_nchw.configure(compile_context, input, &_permuted_input, PermutationVector(1U, 2U, 0U));
427  _permuted_input.info()->set_data_layout(DataLayout::NCHW);
428 
429  // Configure the function to transform the weights tensor from HWI -> IHW
430  _permute_weights_to_nchw.configure(compile_context, weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
431  _permuted_weights.info()->set_data_layout(DataLayout::NCHW);
432  _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
433 
434  input_to_use = &_permuted_input;
435  weights_to_use = &_permuted_weights;
436  output_to_use = &_permuted_output;
437 
438  _kernel = std::make_unique<CLDepthwiseConvolutionLayer3x3NCHWKernel>();
439  }
440  else if(is_nhwc)
441  {
442  if(_needs_weights_reshape)
443  {
444  _reshape_weights->configure(compile_context, weights, &_permuted_weights, info);
445  weights_to_use = &_permuted_weights;
446  }
447  _kernel = std::make_unique<CLDepthwiseConvolutionLayer3x3NHWCKernel>();
448  }
449  else
450  {
451  _kernel = std::make_unique<CLDepthwiseConvolutionLayer3x3NCHWKernel>();
452  }
453 
454  CLTensor *output_multipliers_to_use = nullptr;
455  CLTensor *output_shifts_to_use = nullptr;
456  if(_is_quantized)
457  {
458  const size_t idx_c = get_data_layout_dimension_index(weights->info()->data_layout(), DataLayoutDimension::CHANNEL);
459  const size_t num_filters = (is_quantized_per_channel) ? weights->info()->dimension(idx_c) : 1;
460 
461  _output_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
462  _output_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
463 
464  output_multipliers_to_use = &_output_multipliers;
465  output_shifts_to_use = &_output_shifts;
466  }
467 
468  // Configure kernel
469  _kernel->set_target(gpu_target);
470  _kernel->configure(compile_context, input_to_use, weights_to_use, biases, output_to_use, conv_info, depth_multiplier,
471  act_info, dilation, output_multipliers_to_use, output_shifts_to_use);
472 
473  if(_is_quantized)
474  {
475  _output_multipliers.allocator()->allocate();
476  _output_shifts.allocator()->allocate();
477  }
478 
479  // Permute output if needed
480  if(_needs_permute)
481  {
482  // Configure the function to transform the convoluted output to ACL's native ordering format NCHW
483  _permuted_output.info()->set_data_layout(DataLayout::NCHW);
484  _permute_output_to_nhwc.configure(compile_context, &_permuted_output, output, PermutationVector(2U, 0U, 1U));
485 
486  // Allocate tensors
487  _permuted_input.allocator()->allocate();
488  _permuted_output.allocator()->allocate();
489  }
490  // Configure border handler
491  PixelValue &&zero_value(0.f);
492  if(is_data_type_quantized_asymmetric(input->info()->data_type()))
493  {
494  zero_value = PixelValue(static_cast<uint8_t>(input->info()->quantization_info().uniform().offset));
495  }
496  _border_handler->configure(compile_context, input_to_use, _kernel->border_size(), BorderMode::CONSTANT, zero_value);
497 }
498 
499 Status CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
500  const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, GPUTarget gpu_target, const Size2D &dilation)
501 {
502  return validate_arguments_3x3(input, weights, biases, output, conv_info, depth_multiplier, act_info, gpu_target, dilation);
503 }
504 
506 {
507  prepare();
508 
509  MemoryGroupResourceScope scope_mg(_memory_group);
510 
511  if(_needs_permute)
512  {
513  _permute_input_to_nchw.run();
514  }
515  CLScheduler::get().enqueue(*_border_handler);
516  CLScheduler::get().enqueue(*_kernel);
517 
518  if(_needs_permute)
519  {
520  _permute_output_to_nhwc.run();
521  }
522 }
523 
524 void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::prepare()
525 {
526  if(!_is_prepared)
527  {
528  if(_is_quantized)
529  {
530  _output_multipliers.map();
531  _output_shifts.map();
532  const unsigned int idx_ofms = get_data_layout_dimension_index(_output->info()->data_layout(), DataLayoutDimension::CHANNEL);
534  _original_weights->info(),
535  _output->info(),
536  idx_ofms,
537  reinterpret_cast<int32_t *>(_output_multipliers.ptr_to_element(Coordinates(0))),
538  reinterpret_cast<int32_t *>(_output_shifts.ptr_to_element(Coordinates(0))));
539  _output_multipliers.unmap();
540  _output_shifts.unmap();
541  }
542 
543  if(_needs_permute)
544  {
545  ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
546 
547  _permuted_weights.allocator()->allocate();
548  _permute_weights_to_nchw.run();
549  _original_weights->mark_as_unused();
550  }
551 
552  if(_needs_weights_reshape)
553  {
554  ARM_COMPUTE_ERROR_ON(_needs_permute);
555  ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
556  _permuted_weights.allocator()->allocate();
557  CLScheduler::get().enqueue(*_reshape_weights);
558  _original_weights->mark_as_unused();
559  }
560  _is_prepared = true;
561  }
562 }
563 
564 CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
565  : _memory_manager(std::move(memory_manager)), _depth_conv_func(DepthwiseConvolutionFunction::GENERIC), _func_3x3(), _func_generic()
566 {
567 }
568 
569 void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
570  ActivationLayerInfo act_info, const Size2D &dilation)
571 {
572  configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
573 }
574 
575 void CLDepthwiseConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
576  const PadStrideInfo &conv_info,
577  unsigned int depth_multiplier,
578  ActivationLayerInfo act_info, const Size2D &dilation)
579 {
580  const GPUTarget gpu_target = CLScheduler::get().target();
581  _depth_conv_func = get_depthwiseconvolution_function(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier, act_info,
582  dilation, gpu_target);
583  switch(_depth_conv_func)
584  {
586  _func_3x3.set_memory_group(_memory_manager);
587  _func_3x3.configure(compile_context, input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
588  break;
590  {
591  _func_generic.set_memory_group(_memory_manager);
592  _func_generic.configure(compile_context, input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
593  }
594  break;
595  default:
596  ARM_COMPUTE_ERROR("Unsupported DepthwiseConvolutionFunction");
597  }
598 }
599 
600 Status CLDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
601  unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
602 {
603  const GPUTarget gpu_target = CLScheduler::get().target();
604  DepthwiseConvolutionFunction depth_conv_func = get_depthwiseconvolution_function(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, gpu_target);
605  switch(depth_conv_func)
606  {
608  return CLDepthwiseConvolutionLayerInternal3x3::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, gpu_target, dilation);
610  return CLDepthwiseConvolutionLayerGeneric::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
611  default:
612  ARM_COMPUTE_ERROR("Unsupported DepthwiseConvolutionFunction");
613  }
614 }
615 
616 DepthwiseConvolutionFunction CLDepthwiseConvolutionLayer::get_depthwiseconvolution_function(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
617  const PadStrideInfo &conv_info,
618  unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation, GPUTarget gpu_target)
619 {
620  if(bool(CLDepthwiseConvolutionLayerInternal3x3::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, gpu_target, dilation)) && (is_data_type_float(input->data_type())
621  || get_arch_from_target(gpu_target) == GPUTarget::MIDGARD))
622  {
624  }
625  else
626  {
628  }
629 }
630 
632 {
633  switch(_depth_conv_func)
634  {
636  _func_3x3.run();
637  break;
639  _func_generic.run();
640  break;
641  default:
642  ARM_COMPUTE_ERROR("DepthwiseConvolutionFunction not properly configured");
643  }
644 }
645 
647 {
648  switch(_depth_conv_func)
649  {
651  _func_3x3.prepare();
652  break;
654  _func_generic.prepare();
655  break;
656  default:
657  ARM_COMPUTE_ERROR("DepthwiseConvolutionFunction not properly configured");
658  }
659 }
660 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:1168
DepthwiseConvolutionFunction
Available DepthwiseConvolutionFunction.
Definition: Types.h:148
bool dot8_supported(const cl::Device &device)
Helper function to check whether the cl_arm_integer_dot_product_int8 extension is supported...
Definition: CLHelpers.cpp:239
void prepare() override
Prepare the function for executing.
void compute_quantized_multipliers_and_shifts(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, unsigned int idx_ofms, int32_t *output_multipliers_ptr, int32_t *output_shifts_ptr)
Compute quantized per-channel multipliers and shifts.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(...)
Definition: Validate.h:494
TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info, unsigned int depth_multiplier, const Size2D &dilation=Size2D(1U, 1U))
Calculate the depthwise convolution output shape of a tensor.
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, ActivationLayerInfo act_info=ActivationLayerInfo(), GPUTarget gpu_target=GPUTarget::MIDGARD, const Size2D &dilation=Size2D(1U, 1U), const ITensorInfo *output_multipliers=nullptr, const ITensorInfo *output_shifts=nullptr)
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...
static CLScheduler & get()
Access the scheduler singleton.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
GPUTarget target() const
Get the target GPU.
Definition: CLScheduler.cpp:47
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
Strides PermutationVector
Permutation vector.
Definition: Types.h:49
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
static CLKernelLibrary & get()
Access the KernelLibrary singleton.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status class.
Definition: Error.h:52
GPUTarget get_arch_from_target(GPUTarget target)
Helper function to get the GPU arch.
Definition: GPUTarget.cpp:189
TensorShape compute_reshaped_depthwise_weights_shape(const ITensorInfo &input, const DepthwiseConvolutionReshapeInfo &info)
Calculate the reshaped shape of the weights to use in depthwise convolution.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Activation Layer Information class.
Definition: Types.h:1550
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
void permute(Dimensions< T > &dimensions, const PermutationVector &perm)
Permutes given Dimensions according to a permutation vector.
Definition: Helpers.h:125
1 channel, 1 S32 per channel
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, ActivationLayerInfo act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1U, 1U), const ITensorInfo *output_multipliers=nullptr, const ITensorInfo *output_shifts=nullptr)
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...
Interface to enqueue OpenCL kernels and get/set the OpenCL CommandQueue and ICLTuner.
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
Definition: Utils.h:1245
virtual void prepare()
Prepare the function for executing.
Definition: IFunction.h:57
quantized, asymmetric fixed-point 8-bit number unsigned
void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, ActivationLayerInfo act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1U, 1U))
Initialize the function&#39;s source, destination, weights and convolution information.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
Padding and stride information class.
Definition: Types.h:722
CLDepthwiseConvolutionLayer(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
void enqueue(ICLKernel &kernel, bool flush=true)
Schedule the execution of the passed kernel if possible.
Num samples, channels, height, width.
CLCompileContext class.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1190
quantized, symmetric per channel fixed-point 8-bit number
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
~CLDepthwiseConvolutionLayer()
Default destructor.
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
GPUTarget
Available GPU Targets.
Definition: GPUTarget.h:34
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
void run() override
Run the kernels contained in the function.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
Num samples, height, width, channels.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
void configure(const ICLTensor *input, ICLTensor *output, int32_t block_shape)
Set the input and output tensors.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
Static function to check if given info will lead to a valid configuration of CLPermute.
Definition: CLPermute.cpp:66
size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, ActivationLayerInfo act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1U, 1U))
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:1148
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCWeightsKernelInfo &dwc_weights_info, const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, const Size2D &dilation=Size2D(1U, 1U), const ITensorInfo *output_multipliers=nullptr, const ITensorInfo *output_shifts=nullptr)
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...