Compute Library
 20.08
NEDepthwiseConvolutionAssemblyDispatch.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
26 
29 #include "arm_compute/core/NEON/kernels/assembly/NEDepthwiseConvolutionAssemblyKernelWrapper.h"
30 #include "arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_dilated.hpp"
31 #include "arm_compute/core/NEON/kernels/convolution/depthwise/depthwise_quantized_dilated.hpp"
32 #include "arm_compute/core/Utils.h"
36 
38 
39 #include <set>
40 
41 namespace arm_compute
42 {
43 namespace
44 {
45 std::unique_ptr<depthwise::IDepthwiseConvolution> get_qasymm8_convolver(int kernel_size, int stride_x,
46  int n_batches, int in_rows, int in_cols, int n_channels,
47  int dilation_factor, neon_convolution_kernels::ActivationFunction activation,
48  const qasymm8::QAsymm8Params &wqinfo, const qasymm8::QAsymm8Params &iqinfo, const qasymm8::QAsymm8Params &oqinfo,
49  const qasymm8::QAsymm8RescaleParams &rescale_params,
50  int padding_top, int padding_left, int padding_bottom, int padding_right)
51 {
52  switch(kernel_size)
53  {
54  case 3:
55  {
56  switch(stride_x)
57  {
58  case 1:
59  return arm_compute::support::cpp14::make_unique<depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 3, 3, 1, 1>>(
60  n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
61  case 2:
62  return arm_compute::support::cpp14::make_unique<depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 3, 3, 2, 2>>(
63  n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
64  default:
65  return nullptr;
66  }
67  }
68  case 5:
69  {
70  switch(stride_x)
71  {
72  case 1:
73  return arm_compute::support::cpp14::make_unique<depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 5, 5, 1, 1>>(
74  n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
75  case 2:
76  return arm_compute::support::cpp14::make_unique<depthwise::QAsymm8DilatedDepthwiseConvolution<2, 2, 5, 5, 2, 2>>(
77  n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
78  default:
79  return nullptr;
80  }
81  }
82  default:
83  return nullptr;
84  }
85 }
86 
87 std::unique_ptr<depthwise::IDepthwiseConvolution> get_qsymm8_perchannel_convolver(int kernel_size, int stride_x,
88  int n_batches, int in_rows, int in_cols, int n_channels,
89  neon_convolution_kernels::ActivationFunction activation,
90  const qsymm8::QSymm8PerChannelParams &wqinfo, const qasymm8::QAsymm8Params &iqinfo, const qasymm8::QAsymm8Params &oqinfo,
91  const qsymm8::QSymm8PerChannelRescaleParams &rescale_params,
92  int padding_top, int padding_left, int padding_bottom, int padding_right)
93 {
94  switch(kernel_size)
95  {
96  case 3:
97  {
98  switch(stride_x)
99  {
100  case 1:
101  return arm_compute::support::cpp14::make_unique<depthwise::QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 3, 3, 1, 1>>(
102  n_batches, in_rows, in_cols, n_channels, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
103  case 2:
104  return arm_compute::support::cpp14::make_unique<depthwise::QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 3, 3, 2, 2>>(
105  n_batches, in_rows, in_cols, n_channels, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
106  default:
107  return nullptr;
108  }
109  }
110  case 5:
111  {
112  switch(stride_x)
113  {
114  case 1:
115  return arm_compute::support::cpp14::make_unique<depthwise::QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 5, 5, 1, 1>>(
116  n_batches, in_rows, in_cols, n_channels, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
117  case 2:
118  return arm_compute::support::cpp14::make_unique<depthwise::QSymm8HybridPerChannelDepthwiseConvolution<2, 2, 5, 5, 2, 2>>(
119  n_batches, in_rows, in_cols, n_channels, activation, wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
120  default:
121  return nullptr;
122  }
123  }
124  default:
125  return nullptr;
126  }
127 }
128 
129 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
130 std::unique_ptr<depthwise::IDepthwiseConvolution> get_fp16_convolver(int kernel_size, int stride_x,
131  int n_batches, int in_rows, int in_cols, int n_channels,
132  int dilation_factor, neon_convolution_kernels::ActivationFunction activation,
133  int padding_top, int padding_left, int padding_bottom, int padding_right)
134 {
135  switch(kernel_size)
136  {
137  case 3:
138  {
139  switch(stride_x)
140  {
141  case 1:
142  return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 1, 1, float16_t, float16_t, float16_t>>(
143  n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
144  case 2:
145  return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 2, 2, float16_t, float16_t, float16_t>>(
146  n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
147  default:
148  return nullptr;
149  }
150  }
151  case 5:
152  {
153  switch(stride_x)
154  {
155  case 1:
156  return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 5, 5, 1, 1, float16_t, float16_t, float16_t>>(
157  n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
158  case 2:
159  return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 5, 5, 2, 2, float16_t, float16_t, float16_t>>(
160  n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
161  default:
162  return nullptr;
163  }
164  }
165  default:
166  return nullptr;
167  }
168 }
169 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
170 
171 std::unique_ptr<depthwise::IDepthwiseConvolution> get_fp32_convolver(int kernel_size, int stride_x,
172  int n_batches, int in_rows, int in_cols, int n_channels,
173  int dilation_factor, neon_convolution_kernels::ActivationFunction activation,
174  int padding_top, int padding_left, int padding_bottom, int padding_right)
175 {
176  switch(kernel_size)
177  {
178  case 3:
179  {
180  switch(stride_x)
181  {
182  case 1:
183  return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float, float>>(
184  n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
185  case 2:
186  return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float, float>>(
187  n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
188  default:
189  return nullptr;
190  }
191  }
192  case 5:
193  {
194  switch(stride_x)
195  {
196  case 1:
197  return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<4, 4, 5, 5, 1, 1, float, float, float>>(
198  n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
199  case 2:
200  return arm_compute::support::cpp14::make_unique<depthwise::DilatedDepthwiseConvolution<3, 3, 5, 5, 2, 2, float, float, float>>(
201  n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
202  default:
203  return nullptr;
204  }
205  }
206  default:
207  return nullptr;
208  }
209 }
210 
211 std::unique_ptr<depthwise::IDepthwiseConvolution> create_convolver(const ITensor *input,
212  const ITensor *weights,
213  ITensor *output,
214  PadStrideInfo conv_info,
215  ActivationLayerInfo act_info,
216  const Size2D &dilation)
217 {
219  const DataType data_type = input->info()->data_type();
220  const TensorShape shape = input->info()->tensor_shape();
221 
222  const int n_batches = shape[3];
223  const int in_rows = shape.z();
224  const int in_cols = shape.y();
225  const int n_channels = shape.x();
226  const int dilation_factor = dilation.x();
227  const int padding_top = conv_info.pad_top();
228  const int padding_left = conv_info.pad_left();
229  const int padding_bottom = conv_info.pad_bottom();
230  const int padding_right = conv_info.pad_right();
231 
232  const bool is_uniform_quantized = (data_type == DataType::QASYMM8) && (weights->info()->data_type() == DataType::QASYMM8);
233  const bool is_perchannel_quantized = (data_type == DataType::QASYMM8) && (weights->info()->data_type() == DataType::QSYMM8_PER_CHANNEL);
234 
235  const unsigned int stride_x = conv_info.stride().first;
236  const unsigned int kernel_size = weights->info()->tensor_shape().y();
237 
238  // Map activation function
239  neon_convolution_kernels::ActivationFunction activation = neon_convolution_kernels::ActivationFunction::None;
241  {
242  activation = neon_convolution_kernels::ActivationFunction::ReLU;
243  }
245  {
246  activation = neon_convolution_kernels::ActivationFunction::ReLU6;
247  }
248 
249  // Create quantized convolver
250  if(is_uniform_quantized)
251  {
252  const UniformQuantizationInfo input_qinfo = input->info()->quantization_info().uniform();
253  const UniformQuantizationInfo weights_qinfo = weights->info()->quantization_info().uniform();
254  const UniformQuantizationInfo output_qinfo = output->info()->quantization_info().uniform();
255 
256  // Check that quantization info are in the range [0, 255]
257  ARM_COMPUTE_ERROR_ON(input_qinfo.offset < 0 || input_qinfo.offset > 255);
258  ARM_COMPUTE_ERROR_ON(weights_qinfo.offset < 0 || weights_qinfo.offset > 255);
259  ARM_COMPUTE_ERROR_ON(output_qinfo.offset < 0 || output_qinfo.offset > 255);
260  const qasymm8::QAsymm8Params iqinfo{ static_cast<uint8_t>(input_qinfo.offset), input_qinfo.scale };
261  const qasymm8::QAsymm8Params wqinfo{ static_cast<uint8_t>(weights_qinfo.offset), weights_qinfo.scale };
262  const qasymm8::QAsymm8Params oqinfo{ static_cast<uint8_t>(output_qinfo.offset), output_qinfo.scale };
263 
264  // Calculate rescale parameters
265  const float fmultipler = iqinfo.scale * wqinfo.scale / oqinfo.scale;
266  int32_t qmultiplier = 0;
267  int32_t qshift = 0;
268  quantization::calculate_quantized_multiplier_less_than_one(fmultipler, &qmultiplier, &qshift);
269  qasymm8::QAsymm8RescaleParams rescale_params(qshift, qmultiplier, fmultipler);
270 
271  return get_qasymm8_convolver(kernel_size, stride_x, n_batches, in_rows, in_cols, n_channels, dilation_factor, activation,
272  wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
273  }
274  else if(is_perchannel_quantized)
275  {
276  const UniformQuantizationInfo input_qinfo = input->info()->quantization_info().uniform();
277  const QuantizationInfo weights_qinfo = weights->info()->quantization_info();
278  const UniformQuantizationInfo output_qinfo = output->info()->quantization_info().uniform();
279 
280  // Check that quantization info are in the range [0, 255]
281  ARM_COMPUTE_ERROR_ON(input_qinfo.offset < 0 || input_qinfo.offset > 255);
282  ARM_COMPUTE_ERROR_ON(output_qinfo.offset < 0 || output_qinfo.offset > 255);
283  const qasymm8::QAsymm8Params iqinfo{ static_cast<uint8_t>(input_qinfo.offset), input_qinfo.scale };
284  const qsymm8::QSymm8PerChannelParams wqinfo{ weights_qinfo.scale() };
285  const qasymm8::QAsymm8Params oqinfo{ static_cast<uint8_t>(output_qinfo.offset), output_qinfo.scale };
286 
287  // Calculate rescale parameters
288  std::vector<float> fmultipliers;
289  std::vector<int32_t> qmultipliers;
290  std::vector<int32_t> qshifts;
291 
292  for(auto const s : wqinfo.scales)
293  {
294  const float fmultipler = iqinfo.scale * s / oqinfo.scale;
295  int32_t qmultiplier = 0;
296  int32_t qshift = 0;
297  quantization::calculate_quantized_multiplier_less_than_one(fmultipler, &qmultiplier, &qshift);
298  fmultipliers.push_back(fmultipler);
299  qmultipliers.push_back(qmultiplier);
300  qshifts.push_back(qshift);
301  }
302 
303  qsymm8::QSymm8PerChannelRescaleParams rescale_params(qshifts, qmultipliers, fmultipliers);
304 
305  return get_qsymm8_perchannel_convolver(kernel_size, stride_x, n_batches, in_rows, in_cols, n_channels, activation,
306  wqinfo, iqinfo, oqinfo, rescale_params, padding_top, padding_left, padding_bottom, padding_right);
307  }
308  else
309  {
310  // Create float convolver
311  switch(data_type)
312  {
313 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
314  case DataType::F16:
315  {
316  return get_fp16_convolver(kernel_size, stride_x, n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
317  }
318 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
319  case DataType::F32:
320  {
321  return get_fp32_convolver(kernel_size, stride_x, n_batches, in_rows, in_cols, n_channels, dilation_factor, activation, padding_top, padding_left, padding_bottom, padding_right);
322  }
323  default:
324  return nullptr;
325  }
326  }
327 }
328 } // namespace
329 
330 struct NEDepthwiseConvolutionAssemblyDispatch::LocalImpl
331 {
332  std::unique_ptr<depthwise::IDepthwiseConvolution> _dwc_assembly_kernel{ nullptr };
333  NEDepthwiseConvolutionAssemblyKernelWrapper _dwc_acl_kernel{};
334 };
335 
336 #ifndef DOXYGEN_SKIP_THIS
337 NEDepthwiseConvolutionAssemblyDispatch::NEDepthwiseConvolutionAssemblyDispatch(std::shared_ptr<arm_compute::IMemoryManager> memory_manager)
338  : _memory_group(std::move(memory_manager)), _input(nullptr), _weights(nullptr), _bias(nullptr), _output(nullptr), _packed_weights(), _workspace(), _is_prepared(false),
339  _pImpl(support::cpp14::make_unique<LocalImpl>())
340 {
341 }
342 #endif /* DOXYGEN_SKIP_THIS */
343 
345 
347  const ITensor *weights,
348  const ITensor *bias,
349  ITensor *output,
350  const PadStrideInfo &conv_info,
351  unsigned int depth_multiplier,
352  const ActivationLayerInfo &act_info,
353  const Size2D &dilation)
354 {
356  ARM_COMPUTE_UNUSED(depth_multiplier);
358  weights->info(),
359  bias != nullptr ? bias->info() : nullptr,
360  output->info(),
361  conv_info,
362  depth_multiplier,
363  act_info,
364  dilation));
365 
366  // Output auto inizialitation if not yet initialized
368  auto_init_if_empty(*output->info(), input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape).set_quantization_info(output->info()->quantization_info()));
369 
370  _input = input;
371  _weights = weights;
372  _bias = bias;
373  _output = output;
374  _is_prepared = false;
375 
376  // Create convolver
377  _pImpl->_dwc_assembly_kernel = create_convolver(input, weights, output, conv_info, act_info, dilation);
378  ARM_COMPUTE_ERROR_ON(_pImpl->_dwc_assembly_kernel == nullptr);
379 
380  // Create assembly kernel wrapper
381  _pImpl->_dwc_acl_kernel.configure(_pImpl->_dwc_assembly_kernel.get());
382 
383  constexpr size_t alignment = 128;
384 
385  // Create workspace
386  const unsigned int num_threads = NEScheduler::get().num_threads();
387  const size_t workspace_size = _pImpl->_dwc_assembly_kernel->get_working_space_size(num_threads);
388  ARM_COMPUTE_ERROR_ON_MSG(workspace_size == 0, "Workspace size cannot be 0 !");
389  _workspace.allocator()->init(TensorInfo(TensorShape{ workspace_size }, 1, DataType::S8), alignment);
390  _memory_group.manage(&_workspace);
391  _workspace.allocator()->allocate();
392 
393  // Create packing tensor
394  const size_t pack_tensor_size = _pImpl->_dwc_assembly_kernel->get_packed_params_size();
395  ARM_COMPUTE_ERROR_ON_MSG(pack_tensor_size == 0, "Pack tensor size cannot be 0 !");
396  _packed_weights.allocator()->init(TensorInfo(TensorShape{ pack_tensor_size }, 1, DataType::S8), alignment);
397 }
398 
400  const ITensorInfo *weights,
401  const ITensorInfo *bias,
402  const ITensorInfo *output,
403  const PadStrideInfo &conv_info,
404  unsigned int depth_multiplier,
405  const ActivationLayerInfo &act_info,
406  const Size2D &dilation)
407 {
410  if(weights->data_type() != DataType::QSYMM8_PER_CHANNEL)
411  {
413  }
415 
416  // Validate convolver
418 
419  // Validate activation
420  const bool is_relu = arm_compute::utils::info_helpers::is_relu(act_info);
423 
424  // Check bias
425  if(bias != nullptr)
426  {
427  unsigned int channel_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
428  ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
429  ARM_COMPUTE_RETURN_ERROR_ON(bias->dimension(0) != weights->dimension(channel_idx));
430  }
431 
432  // Check output
433  if(output->total_size() != 0)
434  {
438  }
439 
440  // The uniform quantization case will only have 1 scale value in the weights quantization info
441  const UniformQuantizationInfo input_qinfo = input->quantization_info().uniform();
442  const QuantizationInfo weights_qinfo = weights->quantization_info();
443  const UniformQuantizationInfo output_qinfo = output->quantization_info().uniform();
444  for(auto const s : weights_qinfo.scale())
445  {
446  const float fmultipler = input_qinfo.scale * s / output_qinfo.scale;
447  ARM_COMPUTE_RETURN_ERROR_ON(fmultipler > 1.f);
448  }
449 
450  return Status{};
451 }
452 
454  const ITensorInfo *weights,
456  unsigned int depth_multiplier,
457  const Size2D &dilation)
458 {
460 
461  // Reshape input shape if in NHWC format
462  const DataLayout data_layout = input->data_layout();
463  TensorShape in_shape{ input->tensor_shape() };
465  {
466  in_shape.set(Window::DimX, input->tensor_shape().y());
467  in_shape.set(Window::DimY, input->tensor_shape().z());
468  in_shape.set(Window::DimZ, input->tensor_shape().x());
469  }
470 
471  // Check data type
472  // TODO (COMPMID-3004): Add assembly optimized routine for QASYMM8_SIGNED NEDepthwiseConvolutionLayer
473  const DataType input_type = input->data_type();
474  const bool is_input_type_valid = is_data_type_float(input_type) || input_type == DataType::QASYMM8;
475  const DataType weights_type = weights->data_type();
476  const bool is_weights_type_valid = is_data_type_float(weights_type) || weights_type == DataType::QASYMM8 || weights_type == DataType::QASYMM8_SIGNED
477  || weights_type == DataType::QSYMM8_PER_CHANNEL;
478 
479  // Check weighs size
480  std::set<unsigned int> supported_kernel_sizes = { 3, 5 };
483  const unsigned int kernel_w = weights->dimension(width_idx);
484  const unsigned int kernel_h = weights->dimension(height_idx);
485  bool weights_supported = (kernel_w == kernel_h) && (supported_kernel_sizes.count(kernel_w) != 0);
486 
487  // Check for supported strides
488  const auto &strides = conv_info.stride();
489  bool supported_strides = (strides.first == strides.second) && ((strides.first == 1) || (strides.first == 2));
490 
491  // Check for supported padding
492  const auto pad_top = conv_info.pad_top();
493  const auto pad_right = conv_info.pad_right();
494  const auto pad_bottom = conv_info.pad_bottom();
495  const auto pad_left = conv_info.pad_left();
496  PadStrideInfo same_pad = calculate_same_pad(in_shape, TensorShape(kernel_w, kernel_h), conv_info, DataLayout::NCHW, dilation);
497  bool is_same_padding = (pad_top == same_pad.pad_top()) && (pad_right == same_pad.pad_right()) && (pad_bottom == same_pad.pad_bottom()) && (pad_left == same_pad.pad_left());
498  bool is_valid_padding = (pad_top == 0) && (pad_right == 0) && (pad_bottom == 0) && (pad_left == 0);
499  bool supported_padding = is_same_padding || is_valid_padding;
500  // TODO(COMPMID-2464): Enable once dilated conv with stride 2 is supported
501  bool is_dilation_supported = ((dilation == Size2D(1U, 1U)) || ((dilation.x() == dilation.y()) && strides.first == 1));
502 
503  if(weights_type == DataType::QSYMM8_PER_CHANNEL)
504  {
505  is_dilation_supported = is_dilation_supported && (dilation == Size2D(1U, 1U));
506  }
507 
508  return is_input_type_valid && is_weights_type_valid && weights_supported && supported_strides && supported_padding && (depth_multiplier == 1) && is_dilation_supported;
509 }
510 
512 {
513  // Prepare assembly kernel
514  prepare();
515 
516  MemoryGroupResourceScope scope_mg(_memory_group);
517 
518  // Setup inputs/outputs
519  ARM_COMPUTE_ERROR_ON(_workspace.buffer() == nullptr);
520  _pImpl->_dwc_assembly_kernel->set_working_space(static_cast<void *>(_workspace.buffer()));
521 
522  ARM_COMPUTE_ERROR_ON(_input->buffer() == nullptr);
523  const int input_element_size = _input->info()->element_size();
524  const int input_batch_stride = _input->info()->strides_in_bytes()[3] / input_element_size;
525  const int input_row_stride = _input->info()->strides_in_bytes().z() / input_element_size;
526  const int input_col_stride = _input->info()->strides_in_bytes().y() / input_element_size;
527  const void *input_ptr = _input->buffer() + _input->info()->offset_first_element_in_bytes();
528  _pImpl->_dwc_assembly_kernel->set_input(input_ptr, input_batch_stride, input_row_stride, input_col_stride);
529 
530  ARM_COMPUTE_ERROR_ON(_output->buffer() == nullptr);
531  const int output_element_size = _output->info()->element_size();
532  const int output_batch_stride = _output->info()->strides_in_bytes()[3] / output_element_size;
533  const int output_row_stride = _output->info()->strides_in_bytes().z() / output_element_size;
534  const int output_col_stride = _output->info()->strides_in_bytes().y() / output_element_size;
535  void *output_ptr = _output->buffer() + _output->info()->offset_first_element_in_bytes();
536  _pImpl->_dwc_assembly_kernel->set_output(output_ptr, output_batch_stride, output_row_stride, output_col_stride);
537 
538  // Schedule assembly kernel
539  NEScheduler::get().schedule(&_pImpl->_dwc_acl_kernel, Window::DimX);
540 }
541 
543 {
544  if(!_is_prepared)
545  {
546  _packed_weights.allocator()->allocate();
547  ARM_COMPUTE_ERROR_ON(_packed_weights.buffer() == nullptr);
548 
549  // Pack weights and bias
550  const int weights_element_size = _weights->info()->element_size();
551  const int weights_row_stride = _weights->info()->strides_in_bytes().z() / weights_element_size;
552  const int weights_col_stride = _weights->info()->strides_in_bytes().y() / weights_element_size;
553  _pImpl->_dwc_assembly_kernel->pack_params(_packed_weights.buffer(),
554  _weights->buffer() + _weights->info()->offset_first_element_in_bytes(),
555  weights_row_stride,
556  weights_col_stride,
557  (_bias != nullptr) ? _bias->buffer() : nullptr);
558  _pImpl->_dwc_assembly_kernel->set_packed_params_buffer(_packed_weights.buffer());
559 
560  _weights->mark_as_unused();
561  if(_bias != nullptr)
562  {
563  _bias->mark_as_unused();
564  }
565  _is_prepared = true;
566  }
567 }
568 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
const DataLayout data_layout
Definition: Im2Col.cpp:146
TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info, unsigned int depth_multiplier, const Size2D &dilation=Size2D(1U, 1U))
Calculate the depthwise convolution output shape of a tensor.
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
bool enabled() const
Check if initialised.
Definition: Types.h:1567
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(...)
Definition: Validate.h:494
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
virtual DataType data_type() const =0
Data type used for each element of the tensor.
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, const ActivationLayerInfo &act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1, 1))
Static function to check if given info will lead to a valid configuration of NEDepthwiseConvolutionAs...
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
1 channel, 1 F32 per channel
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Quantization info when assuming per layer quantization.
unsigned int pad_top() const
Get the top padding.
Definition: Types.h:773
Status class.
Definition: Error.h:52
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Activation Layer Information class.
Definition: Types.h:1517
Interface for NEON tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2020 Arm Limited.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
Definition: Helpers.inl:207
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
Definition: Validate.h:108
1 channel, 1 F16 per channel
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
Definition: Tensor.cpp:48
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor's metadata.
Definition: Tensor.cpp:33
NEDepthwiseConvolutionAssemblyDispatch(std::shared_ptr< IMemoryManager > memory_manager=nullptr)
Default constructor.
void mark_as_unused() const
Marks a tensor as unused.
Definition: ITensor.cpp:168
void manage(IMemoryManageable *obj) override
Sets a object to be managed by the given memory group.
Definition: MemoryGroup.h:79
Quantization information.
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(...)
Definition: Validate.h:288
quantized, asymmetric fixed-point 8-bit number unsigned
T z() const
Alias to access the size of the third dimension.
Definition: Dimensions.h:91
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
virtual uint8_t * buffer() const =0
Interface to be implemented by the child class to return a pointer to CPU memory.
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
UniformQuantizationInfo uniform() const
Return per layer quantization info.
_Unique_if< T >::_Single_object make_unique(Args &&... args)
Construct a single object and return a unique pointer to it.
Definition: MemorySupport.h:88
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
unsigned int pad_right() const
Get the right padding.
Definition: Types.h:768
const std::vector< float > & scale() const
Scale vector accessor.
void configure(const ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier=1, const ActivationLayerInfo &act_info=ActivationLayerInfo(), const Size2D &dilation=Size2D(1, 1))
Initialize the function's source, destination, kernels and border_size.
Padding and stride information class.
Definition: Types.h:689
virtual size_t element_size() const =0
Element size in bytes calculated as data_size() * num_channels()
static bool is_optimized_supported(const ITensorInfo *input, const ITensorInfo *weights, PadStrideInfo conv_info, unsigned int depth_multiplier=1, const Size2D &dilation=Size2D(1, 1))
Check if the optimized kernel can be used for the given kernel sizes and strides.
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
Num samples, channels, height, width.
bool is_relu6(ActivationLayerInfo activation_info)
Checks if activation information correspond to a relu6 activation function.
Definition: InfoHelpers.h:54
quantized, symmetric per channel fixed-point 8-bit number
virtual size_t offset_first_element_in_bytes() const =0
The offset from the beginning of the memory allocation to the first element of the tensor.
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
Memory group resources scope handling class.
Definition: IMemoryGroup.h:82
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
virtual void schedule(ICPPKernel *kernel, const Hints &hints)=0
Runs the kernel in the same thread as the caller synchronously.
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
Num samples, height, width, channels.
uint8_t * buffer() const override
Interface to be implemented by the child class to return a pointer to CPU memory.
Definition: Tensor.cpp:43
bool is_relu(ActivationLayerInfo activation_info)
Checks if activation information correspond to a relu activation function.
Definition: InfoHelpers.h:43
Store the tensor's metadata.
Definition: TensorInfo.h:45
T y() const
Alias to access the size of the second dimension.
Definition: Dimensions.h:86
quantized, asymmetric fixed-point 8-bit number signed
virtual const Strides & strides_in_bytes() const =0
The strides in bytes for accessing each dimension of the tensor.
virtual unsigned int num_threads() const =0
Returns the number of threads that the SingleThreadScheduler has in his pool.
size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:332
Status calculate_quantized_multiplier_less_than_one(float multiplier, int32_t *quant_multiplier, int32_t *right_shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier with value less than one.
void run() override
Run the kernels contained in the function.
unsigned int pad_bottom() const
Get the bottom padding.
Definition: Types.h:778
DataType
Available data types.
Definition: Types.h:77
unsigned int pad_left() const
Get the left padding.
Definition: Types.h:763
DataLayout
[DataLayout enum definition]
Definition: Types.h:120
signed 8-bit number
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:1101
PadStrideInfo calculate_same_pad(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo conv_info, DataLayout data_layout=DataLayout::NCHW, const Size2D &dilation=Size2D(1u, 1u), const DimensionRoundingType &rounding_type=DimensionRoundingType::FLOOR)
Calculate padding requirements in case of SAME padding.
Definition: Utils.cpp:333
static IScheduler & get()
Access the scheduler singleton.
Definition: Scheduler.cpp:95