Compute Library
 20.08
CLDepthwiseConvolutionLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
34 #include "support/MemorySupport.h"
35 
36 namespace arm_compute
37 {
38 using namespace arm_compute::misc;
40 
41 namespace
42 {
43 Status validate_arguments_3x3(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
44  unsigned int depth_multiplier, ActivationLayerInfo act_info, GPUTarget gpu_target, const Size2D &dilation)
45 {
46  // This function should be removed and incorporated inside CLDepthwiseConvolutionLayerInternal3x3 once CLDepthwiseConvolutionLayer3x3 is properly removed
50 
51  const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
52  const bool is_nhwc = input->data_layout() == DataLayout::NHWC;
53  const bool needs_permute = is_nhwc && (depth_multiplier > 1);
54  const bool needs_weights_reshape = is_nhwc && (depth_multiplier == 1) && is_quantized;
55  const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1));
56  const bool is_stride_1_dilation_1 = (is_stride_1 && dilation.x() == 1 && dilation.y() == 1);
57  const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device());
58  DepthwiseConvolutionReshapeInfo info;
59  info.c0 = 4;
60  info.transpose = is_stride_1_dilation_1 && is_dot8_supported;
61 
62  TensorInfo output_multipliers_shifts_info(TensorInfo(TensorShape(1U), 1, DataType::S32));
63  if(is_quantized)
64  {
66  {
68 
69  const size_t idx_c = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::CHANNEL);
70  output_multipliers_shifts_info.set_tensor_shape(TensorShape(weights->dimension(idx_c)));
71  }
72  else
73  {
75  }
76  }
77 
78  if(needs_permute)
79  {
80  TensorShape permuted_input_shape = input->tensor_shape();
81  TensorShape permuted_weights_shape = weights->tensor_shape();
82  TensorShape permuted_output_shape = shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
83 
84  permute(permuted_input_shape, PermutationVector(1U, 2U, 0U));
85  permute(permuted_weights_shape, PermutationVector(1U, 2U, 0U));
86  permute(permuted_output_shape, PermutationVector(1U, 2U, 0U));
87 
88  const TensorInfo permuted_input = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_input_shape).set_data_layout(DataLayout::NCHW);
89  const TensorInfo permuted_weights = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_weights_shape).set_data_layout(DataLayout::NCHW);
90  const TensorInfo permuted_output = output->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_output_shape).set_data_layout(DataLayout::NCHW);
91 
92  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayer3x3NCHWKernel::validate(&permuted_input, &permuted_weights, biases, &permuted_output,
93  conv_info, depth_multiplier, act_info, gpu_target,
94  dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
95  }
96  else if(is_nhwc)
97  {
98  if(needs_weights_reshape)
99  {
101  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayer3x3NHWCKernel::validate(input, &weights->clone()->set_tensor_shape(reshaped_weights_shape), biases,
102  output, conv_info, depth_multiplier, act_info,
103  dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
104  }
105  else
106  {
108  dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
109  }
110  }
111  else
112  {
113  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayer3x3NCHWKernel::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, gpu_target,
114  dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
115  }
116  return Status{};
117 }
118 } // namespace
119 
120 CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::CLDepthwiseConvolutionLayerGeneric(std::shared_ptr<IMemoryManager> memory_manager)
121  : _memory_group(std::move(memory_manager)),
122  _dwc_native_kernel(),
123  _permute_input_to_nhwc(),
124  _permute_weights_to_nhwc(),
125  _permute_output_to_nchw(),
126  _permuted_input(),
127  _permuted_weights(),
128  _permuted_output(),
129  _output_multipliers(),
130  _output_shifts(),
131  _original_weights(),
132  _input(),
133  _output(),
134  _needs_permute(false),
135  _is_prepared(false),
136  _is_quantized(false)
137 {
138 }
139 
140 void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info,
141  unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
142 {
143  configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
144 }
145 
146 void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases,
147  ICLTensor *output, const PadStrideInfo &conv_info,
148  unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
149 {
152  weights->info(),
153  biases != nullptr ? biases->info() : nullptr,
154  output->info(),
155  conv_info,
156  depth_multiplier,
157  act_info,
158  dilation));
159 
160  _is_quantized = is_data_type_quantized(input->info()->data_type());
161  _is_prepared = false;
162  _original_weights = weights;
163  _input = input;
164  _output = output;
165  _needs_permute = input->info()->data_layout() == DataLayout::NCHW;
166 
167  ICLTensor *input_to_use = input;
168  const ICLTensor *weights_to_use = weights;
169  ICLTensor *output_to_use = output;
170  if(_needs_permute)
171  {
172  _memory_group.manage(&_permuted_input);
173  _memory_group.manage(&_permuted_output);
174 
175  // Configure the function to transform the input tensor from NCHW -> NHWC
176  _permute_input_to_nhwc.configure(compile_context, input, &_permuted_input, PermutationVector(2U, 0U, 1U));
177  _permuted_input.info()->set_data_layout(DataLayout::NHWC);
178 
179  // Configure the function to transform the weights tensor from IHW -> HWI
180  _permute_weights_to_nhwc.configure(compile_context, weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
181  _permuted_weights.info()->set_data_layout(DataLayout::NHWC);
182 
183  // Set output quantization info before dwc kernel configure
184  _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
185 
186  input_to_use = &_permuted_input;
187  weights_to_use = &_permuted_weights;
188  output_to_use = &_permuted_output;
189  }
190 
191  CLTensor *output_multipliers_to_use = nullptr;
192  CLTensor *output_shifts_to_use = nullptr;
193  if(_is_quantized)
194  {
196  const size_t num_filters = (is_data_type_quantized_per_channel(weights->info()->data_type())) ? weights->info()->dimension(idx_c) : 1;
197 
198  _output_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
199  _output_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
200 
201  output_multipliers_to_use = &_output_multipliers;
202  output_shifts_to_use = &_output_shifts;
203  }
204 
205  DWCWeightsKernelInfo dwc_weights_info;
206  dwc_weights_info.n0 = (depth_multiplier == 1) ? 8 : 1;
207  DWCKernelInfo dwc_info;
208  dwc_info.activation_info = act_info;
209  _dwc_native_kernel.configure(compile_context, input_to_use, weights_to_use, biases, output_to_use,
210  dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation,
211  output_multipliers_to_use, output_shifts_to_use);
212 
213  if(_needs_permute)
214  {
215  _permuted_input.allocator()->allocate();
216 
217  // Configure the function to transform the convoluted output to NCHW format
218  _permuted_output.info()->set_data_layout(DataLayout::NCHW);
219  _permute_output_to_nchw.configure(compile_context, &_permuted_output, output, PermutationVector(1U, 2U, 0U));
220  _permuted_output.allocator()->allocate();
221  }
222 
223  if(_is_quantized)
224  {
225  _output_multipliers.allocator()->allocate();
226  _output_shifts.allocator()->allocate();
227  }
228 }
229 
230 Status CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
231  const PadStrideInfo &conv_info,
232  unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
233 {
235  const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
236  const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
237 
238  ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) + (weights->dimension(idx_w) - 1) * (dilation.x() - 1) > input->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right());
239  ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) + (weights->dimension(idx_h) - 1) * (dilation.y() - 1) > input->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom());
240 
241  DWCWeightsKernelInfo dwc_weights_info;
242  dwc_weights_info.n0 = (depth_multiplier == 1) ? 8 : 1;
243  DWCKernelInfo dwc_info;
244  dwc_info.activation_info = act_info;
245 
246  const bool needs_permute = input->data_layout() == DataLayout::NCHW;
247 
248  const bool is_quantized = is_data_type_quantized(input->data_type());
249 
250  TensorInfo output_multipliers_shifts_info(TensorInfo(TensorShape(1U), 1, DataType::S32));
251  if(is_quantized)
252  {
254  {
256 
257  const size_t idx_c = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::CHANNEL);
258  output_multipliers_shifts_info.set_tensor_shape(TensorShape(weights->dimension(idx_c)));
259  }
260  else
261  {
263  }
264  }
265 
266  if(needs_permute)
267  {
268  TensorShape permuted_input_shape = input->tensor_shape();
269  TensorShape permuted_weights_shape = weights->tensor_shape();
270  TensorShape permuted_output_shape = shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
271 
272  permute(permuted_input_shape, PermutationVector(2U, 0U, 1U));
273  permute(permuted_weights_shape, PermutationVector(2U, 0U, 1U));
274  permute(permuted_output_shape, PermutationVector(2U, 0U, 1U));
275 
276  const TensorInfo permuted_input = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_input_shape).set_data_layout(DataLayout::NHWC);
277  const TensorInfo permuted_weights = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_weights_shape).set_data_layout(DataLayout::NHWC);
278  const TensorInfo permuted_output = output->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_output_shape).set_data_layout(DataLayout::NHWC);
279 
282  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(&permuted_input, &permuted_weights, biases, &permuted_output, dwc_weights_info,
283  dwc_info, conv_info, depth_multiplier, dilation,
284  &output_multipliers_shifts_info, &output_multipliers_shifts_info));
285  ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(&permuted_output, output, PermutationVector(1U, 2U, 0U)));
286  }
287  else
288  {
289  ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(input, weights, biases, output, dwc_weights_info, dwc_info, conv_info, depth_multiplier,
290  dilation, &output_multipliers_shifts_info, &output_multipliers_shifts_info));
291  }
292  return Status{};
293 }
294 
296 {
297  prepare();
298 
299  MemoryGroupResourceScope scope_mg(_memory_group);
300 
301  if(_needs_permute)
302  {
303  _permute_input_to_nhwc.run();
304  }
305  CLScheduler::get().enqueue(_dwc_native_kernel);
306  if(_needs_permute)
307  {
308  _permute_output_to_nchw.run();
309  }
310 }
311 
312 void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::prepare()
313 {
314  if(!_is_prepared)
315  {
316  if(_is_quantized)
317  {
318  _output_multipliers.map();
319  _output_shifts.map();
320  const unsigned int idx_ofms = get_data_layout_dimension_index(_output->info()->data_layout(), DataLayoutDimension::CHANNEL);
322  _original_weights->info(),
323  _output->info(),
324  idx_ofms,
325  reinterpret_cast<int32_t *>(_output_multipliers.ptr_to_element(Coordinates(0))),
326  reinterpret_cast<int32_t *>(_output_shifts.ptr_to_element(Coordinates(0))));
327  _output_multipliers.unmap();
328  _output_shifts.unmap();
329  }
330 
331  if(_needs_permute)
332  {
333  ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
334 
335  _permuted_weights.allocator()->allocate();
336  _permute_weights_to_nhwc.run();
337  _original_weights->mark_as_unused();
338  }
339  _is_prepared = true;
340  }
341 }
342 
343 CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::CLDepthwiseConvolutionLayerInternal3x3(std::shared_ptr<IMemoryManager> memory_manager)
344  : _memory_group(std::move(memory_manager)),
345  _kernel(nullptr),
346  _border_handler(),
347  _permute_input_to_nchw(),
348  _permute_weights_to_nchw(),
349  _permute_output_to_nhwc(),
350  _reshape_weights(),
351  _permuted_input(),
352  _permuted_weights(),
353  _permuted_output(),
354  _output_multipliers(),
355  _output_shifts(),
356  _original_weights(nullptr),
357  _input(nullptr),
358  _output(nullptr),
359  _needs_permute(false),
360  _needs_weights_reshape(false),
361  _is_prepared(false),
362  _is_quantized(false)
363 {
364 }
365 
366 void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
367  const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
368 {
369  configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
370 }
371 
372 void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases,
373  ICLTensor *output,
374  const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
375 {
376  const GPUTarget gpu_target = CLScheduler::get().target();
377 
378  // Perform validation step
381  weights->info(),
382  biases != nullptr ? biases->info() : nullptr,
383  output->info(),
384  conv_info,
385  depth_multiplier,
386  act_info,
387  gpu_target,
388  dilation));
389 
390  const bool is_nhwc = input->info()->data_layout() == DataLayout::NHWC;
391  _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
392  _needs_permute = is_nhwc && (depth_multiplier > 1);
393  _needs_weights_reshape = is_nhwc && (depth_multiplier == 1) && _is_quantized;
394 
395  _is_prepared = false;
396  _original_weights = weights;
397  _input = input;
398  _output = output;
399 
400  ICLTensor *input_to_use = input;
401  const ICLTensor *weights_to_use = weights;
402  ICLTensor *output_to_use = output;
403 
404  const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
405  const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1));
406  const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device()) && !is_quantized_per_channel;
407  const bool is_stride_1_dilation_1 = (is_stride_1 && dilation.x() == 1 && dilation.y() == 1);
408 
409  DepthwiseConvolutionReshapeInfo info;
410  info.c0 = 4;
411  info.transpose = is_stride_1_dilation_1 && is_dot8_supported;
412 
413  if(_needs_permute)
414  {
415  _memory_group.manage(&_permuted_input);
416  _memory_group.manage(&_permuted_output);
417 
418  // Configure the function to transform the input tensor from NHWC -> NCHW
419  _permute_input_to_nchw.configure(compile_context, input, &_permuted_input, PermutationVector(1U, 2U, 0U));
420  _permuted_input.info()->set_data_layout(DataLayout::NCHW);
421 
422  // Configure the function to transform the weights tensor from HWI -> IHW
423  _permute_weights_to_nchw.configure(compile_context, weights, &_permuted_weights, PermutationVector(1U, 2U, 0U));
424  _permuted_weights.info()->set_data_layout(DataLayout::NCHW);
425  _permuted_output.info()->set_quantization_info(output->info()->quantization_info());
426 
427  input_to_use = &_permuted_input;
428  weights_to_use = &_permuted_weights;
429  output_to_use = &_permuted_output;
430 
431  _kernel = arm_compute::support::cpp14::make_unique<CLDepthwiseConvolutionLayer3x3NCHWKernel>();
432  }
433  else if(is_nhwc)
434  {
435  if(_needs_weights_reshape)
436  {
437  _reshape_weights.configure(compile_context, weights, &_permuted_weights, info);
438  weights_to_use = &_permuted_weights;
439  }
440  _kernel = arm_compute::support::cpp14::make_unique<CLDepthwiseConvolutionLayer3x3NHWCKernel>();
441  }
442  else
443  {
444  _kernel = arm_compute::support::cpp14::make_unique<CLDepthwiseConvolutionLayer3x3NCHWKernel>();
445  }
446 
447  CLTensor *output_multipliers_to_use = nullptr;
448  CLTensor *output_shifts_to_use = nullptr;
449  if(_is_quantized)
450  {
452  const size_t num_filters = (is_quantized_per_channel) ? weights->info()->dimension(idx_c) : 1;
453 
454  _output_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
455  _output_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
456 
457  output_multipliers_to_use = &_output_multipliers;
458  output_shifts_to_use = &_output_shifts;
459  }
460 
461  // Configure kernel
462  _kernel->set_target(gpu_target);
463  _kernel->configure(compile_context, input_to_use, weights_to_use, biases, output_to_use, conv_info, depth_multiplier,
464  act_info, dilation, output_multipliers_to_use, output_shifts_to_use);
465 
466  if(_is_quantized)
467  {
468  _output_multipliers.allocator()->allocate();
469  _output_shifts.allocator()->allocate();
470  }
471 
472  // Permute output if needed
473  if(_needs_permute)
474  {
475  // Configure the function to transform the convoluted output to ACL's native ordering format NCHW
476  _permuted_output.info()->set_data_layout(DataLayout::NCHW);
477  _permute_output_to_nhwc.configure(compile_context, &_permuted_output, output, PermutationVector(2U, 0U, 1U));
478 
479  // Allocate tensors
480  _permuted_input.allocator()->allocate();
481  _permuted_output.allocator()->allocate();
482  }
483  // Configure border handler
484  PixelValue &&zero_value(0.f);
485  if(is_data_type_quantized_asymmetric(input->info()->data_type()))
486  {
487  zero_value = PixelValue(static_cast<uint8_t>(input->info()->quantization_info().uniform().offset));
488  }
489  _border_handler.configure(compile_context, input_to_use, _kernel->border_size(), BorderMode::CONSTANT, zero_value);
490 }
491 
492 Status CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
493  const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, GPUTarget gpu_target, const Size2D &dilation)
494 {
495  return validate_arguments_3x3(input, weights, biases, output, conv_info, depth_multiplier, act_info, gpu_target, dilation);
496 }
497 
499 {
500  prepare();
501 
502  MemoryGroupResourceScope scope_mg(_memory_group);
503 
504  if(_needs_permute)
505  {
506  _permute_input_to_nchw.run();
507  }
508  CLScheduler::get().enqueue(_border_handler);
509  CLScheduler::get().enqueue(*_kernel);
510 
511  if(_needs_permute)
512  {
513  _permute_output_to_nhwc.run();
514  }
515 }
516 
517 void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::prepare()
518 {
519  if(!_is_prepared)
520  {
521  if(_is_quantized)
522  {
523  _output_multipliers.map();
524  _output_shifts.map();
525  const unsigned int idx_ofms = get_data_layout_dimension_index(_output->info()->data_layout(), DataLayoutDimension::CHANNEL);
527  _original_weights->info(),
528  _output->info(),
529  idx_ofms,
530  reinterpret_cast<int32_t *>(_output_multipliers.ptr_to_element(Coordinates(0))),
531  reinterpret_cast<int32_t *>(_output_shifts.ptr_to_element(Coordinates(0))));
532  _output_multipliers.unmap();
533  _output_shifts.unmap();
534  }
535 
536  if(_needs_permute)
537  {
538  ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
539 
540  _permuted_weights.allocator()->allocate();
541  _permute_weights_to_nchw.run();
542  _original_weights->mark_as_unused();
543  }
544 
545  if(_needs_weights_reshape)
546  {
547  ARM_COMPUTE_ERROR_ON(_needs_permute);
548  ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
549  _permuted_weights.allocator()->allocate();
550  CLScheduler::get().enqueue(_reshape_weights);
551  _original_weights->mark_as_unused();
552  }
553  _is_prepared = true;
554  }
555 }
556 
557 CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
558  : _memory_manager(std::move(memory_manager)), _depth_conv_func(DepthwiseConvolutionFunction::GENERIC), _func_3x3(), _func_generic()
559 {
560 }
561 
562 void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
563  ActivationLayerInfo act_info, const Size2D &dilation)
564 {
565  configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
566 }
567 
568 void CLDepthwiseConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
569  const PadStrideInfo &conv_info,
570  unsigned int depth_multiplier,
571  ActivationLayerInfo act_info, const Size2D &dilation)
572 {
573  const GPUTarget gpu_target = CLScheduler::get().target();
574  _depth_conv_func = get_depthwiseconvolution_function(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier, act_info,
575  dilation, gpu_target);
576  switch(_depth_conv_func)
577  {
579  _func_3x3.set_memory_group(_memory_manager);
580  _func_3x3.configure(compile_context, input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
581  break;
583  {
584  _func_generic.set_memory_group(_memory_manager);
585  _func_generic.configure(compile_context, input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
586  }
587  break;
588  default:
589  ARM_COMPUTE_ERROR("Unsupported DepthwiseConvolutionFunction");
590  }
591 }
592 
594  unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation)
595 {
596  const GPUTarget gpu_target = CLScheduler::get().target();
597  DepthwiseConvolutionFunction depth_conv_func = get_depthwiseconvolution_function(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, gpu_target);
598  switch(depth_conv_func)
599  {
601  return CLDepthwiseConvolutionLayerInternal3x3::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, gpu_target, dilation);
603  return CLDepthwiseConvolutionLayerGeneric::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation);
604  default:
605  ARM_COMPUTE_ERROR("Unsupported DepthwiseConvolutionFunction");
606  }
607 }
608 
609 DepthwiseConvolutionFunction CLDepthwiseConvolutionLayer::get_depthwiseconvolution_function(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
610  const PadStrideInfo &conv_info,
611  unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation, GPUTarget gpu_target)
612 {
613  if(bool(CLDepthwiseConvolutionLayerInternal3x3::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info, gpu_target, dilation)) && (is_data_type_float(input->data_type())
614  || get_arch_from_target(gpu_target) == GPUTarget::MIDGARD))
615  {
617  }
618  else
619  {
621  }
622 }
623 
625 {
626  switch(_depth_conv_func)
627  {
629  _func_3x3.run();
630  break;
632  _func_generic.run();
633  break;
634  default:
635  ARM_COMPUTE_ERROR("DepthwiseConvolutionFunction not properly configured");
636  }
637 }
638 
640 {
641  switch(_depth_conv_func)
642  {
644  _func_3x3.prepare();
645  break;
647  _func_generic.prepare();
648  break;
649  default:
650  ARM_COMPUTE_ERROR("DepthwiseConvolutionFunction not properly configured");
651  }
652 }
653 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:1121
DepthwiseConvolutionFunction
Available DepthwiseConvolutionFunction.
Definition: Types.h:147
bool dot8_supported(const cl::Device &device)
Helper function to check whether the cl_arm_integer_dot_product_int8 extension is supported.
Definition: CLHelpers.cpp:239
void prepare() override
Prepare the function for executing.
void compute_quantized_multipliers_and_shifts(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, unsigned int idx_ofms, int32_t *output_multipliers_ptr, int32_t *output_shifts_ptr)
Compute quantized per-channel multipliers and shifts.
TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info, unsigned int depth_multiplier, const Size2D &dilation=Size2D(1U, 1U))
Calculate the depthwise convolution output shape of a tensor.
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, ActivationLayerInfo act_info=ActivationLayerInfo(), GPUTarget gpu_target=GPUTarget::MIDGARD, const Size2D &dilation=Size2D(1U, 1U), const ITensorInfo *output_multipliers=nullptr, const ITensorInfo *output_shifts=nullptr)
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...
static CLScheduler & get()
Access the scheduler singleton.
Definition: CLScheduler.cpp:99
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(...)
Definition: Validate.h:494
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
GPUTarget target() const
Get the target GPU.
Definition: CLScheduler.cpp:47
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
1 channel, 1 F32 per channel
Strides PermutationVector
Permutation vector.
Definition: Types.h:49
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
static CLKernelLibrary & get()
Access the KernelLibrary singleton.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status class.
Definition: Error.h:52
GPUTarget get_arch_from_target(GPUTarget target)
Helper function to get the GPU arch.
Definition: GPUTarget.cpp:189
TensorShape compute_reshaped_depthwise_weights_shape(const ITensorInfo &input, const DepthwiseConvolutionReshapeInfo &info)
Calculate the reshaped shape of the weights to use in depthwise convolution.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Activation Layer Information class.
Definition: Types.h:1517
Copyright (c) 2017-2020 Arm Limited.
1 channel, 1 F16 per channel
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor's metadata.
Definition: Tensor.cpp:33
void permute(Dimensions< T > &dimensions, const PermutationVector &perm)
Permutes given Dimensions according to a permutation vector.
Definition: Helpers.h:605
1 channel, 1 S32 per channel
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, ActivationLayerInfo act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1U, 1U), const ITensorInfo *output_multipliers=nullptr, const ITensorInfo *output_shifts=nullptr)
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
Definition: Utils.h:1198
virtual ITensorInfo & set_data_layout(const DataLayout &data_layout)=0
Set the data layout of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, ActivationLayerInfo act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1U, 1U))
Initialize the function's source, destination, weights and convolution information.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
Padding and stride information class.
Definition: Types.h:689
virtual ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info)=0
Set the quantization settings (scale and offset) of the tensor.
CLDepthwiseConvolutionLayer(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
void enqueue(ICLKernel &kernel, bool flush=true)
Schedule the execution of the passed kernel if possible.
Num samples, channels, height, width.
CLCompileContext class.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1143
quantized, symmetric per channel fixed-point 8-bit number
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
GPUTarget
Available GPU Targets.
Definition: GPUTarget.h:34
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
void run() override
Run the kernels contained in the function.
Num samples, height, width, channels.
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
Static function to check if given info will lead to a valid configuration of CLPermute.
Definition: CLPermute.cpp:45
size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:332
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, ActivationLayerInfo act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1U, 1U))
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:1101
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCWeightsKernelInfo &dwc_weights_info, const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, const Size2D &dilation=Size2D(1U, 1U), const ITensorInfo *output_multipliers=nullptr, const ITensorInfo *output_shifts=nullptr)
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.