Compute Library
 22.05
CpuGemmConv2d.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2021-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
28 #include "arm_compute/core/Utils.h"
33 
34 #include "src/common/utils/Log.h"
44 
45 #include <set>
46 #include <tuple>
47 
49 using namespace arm_compute::experimental;
50 
51 namespace arm_compute
52 {
53 namespace cpu
54 {
55 CpuGemmConv2d::SkipInfo CpuGemmConv2d::skip_im_col_info(const ITensorInfo *src, const ITensorInfo *weights, const PadStrideInfo &conv_info,
56  const Size2D &dilation, const ActivationLayerInfo &act_info)
57 {
58  const DataLayout data_layout = src->data_layout();
61  const unsigned int kernel_width = weights->dimension(idx_width);
62  const unsigned int kernel_height = weights->dimension(idx_height);
63  unsigned int conv_w = 0;
64  unsigned int conv_h = 0;
65  std::tie(conv_w, conv_h) = scaled_dimensions(src->dimension(idx_width),
66  src->dimension(idx_height),
67  kernel_width,
68  kernel_height,
69  conv_info,
70  dilation);
71  const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
72 
73  if(skip_im2col)
74  {
75  const bool skip_col2im = (data_layout == DataLayout::NHWC && (bool(CpuGemmConv2d::validate_gemm3d(src, weights, act_info, conv_h, /*skip_im2col*/ true))));
76  if(skip_col2im)
77  {
78  return { true, true };
79  }
80  }
81  else
82  {
83  const bool skip_col2im = (data_layout == DataLayout::NHWC && (bool(CpuGemmConv2d::validate_gemm3d(src, weights, act_info, conv_h, /*skip_im2col*/ false))));
84  if(skip_col2im)
85  {
86  return { false, true };
87  }
88  }
89 
90  // Default case when we cannot reinterpret the input and output as 3D.
91  return { false, false };
92 }
93 
94 CpuGemmConv2d::CpuGemmConv2d()
95  : _weights_reshape_kernel(nullptr), _im2col_kernel(), _mm_gemm(), _mm_gemmlowp(), _col2im_kernel(), _reshape_kernel(), _im2col_output(), _weights_reshaped(), _gemm_output(), _gemm_output_3d(),
96  _data_layout(DataLayout::NCHW), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _is_prepared(false), _aux_mem(AuxTensorIdx::Count)
97 {
98 }
100 
101 void CpuGemmConv2d::configure_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act_info,
102  bool enable_fast_math, int gemm_3d_depth)
103 {
104  ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights);
105  ARM_COMPUTE_ERROR_THROW_ON(validate_mm(src, weights, biases, dst, act_info, enable_fast_math, gemm_3d_depth, _skip_im2col));
106 
107  // Create GEMMInfo structure
108  const GEMMInfo &gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */,
109  gemm_3d_depth, _skip_im2col /* Reinterpret the input as 3D if im2col is skipped */,
110  false, GEMMLowpOutputStageInfo(), false, enable_fast_math, false, act_info);
111 
112  // Supported activations in GEMM
113  const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
116  };
117 
118  if(_is_quantized)
119  {
120  TensorInfo tmp_src{ *src };
121  TensorInfo tmp_weights{ *weights };
122  // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
123  // Extract and negate input and weights offset
124  const QuantizationInfo iqinfo = src->quantization_info();
125  const QuantizationInfo wqinfo = weights->quantization_info();
126  const QuantizationInfo oqinfo = (dst->total_size() == 0) ? iqinfo : dst->quantization_info();
127  const UniformQuantizationInfo uiqinfo = iqinfo.uniform();
128  const UniformQuantizationInfo uoqinfo = oqinfo.uniform();
129  const DataType data_type = src->data_type();
130 
131  tmp_src.set_quantization_info(QuantizationInfo(uiqinfo.scale, -uiqinfo.offset));
132  if(!is_data_type_quantized_per_channel(tmp_weights.data_type()))
133  {
134  const UniformQuantizationInfo uwqinfo = wqinfo.uniform();
135  tmp_weights.set_quantization_info(QuantizationInfo(uwqinfo.scale, -uwqinfo.offset));
136  }
137 
138  // Merge activation with output stage
139  PixelValue type_min{};
140  PixelValue type_max{};
141  std::tie(type_min, type_max) = get_min_max(data_type);
142  int32_t min_activation = type_min.get<int32_t>();
143  int32_t max_activation = type_max.get<int32_t>();
144 
145  if(supported_acts.count(act_info.activation()) != 0)
146  {
147  std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, uoqinfo);
148  }
149 
152  output_info.gemmlowp_offset = uoqinfo.offset;
153  output_info.gemmlowp_min_bound = min_activation;
154  output_info.gemmlowp_max_bound = max_activation;
155  output_info.is_quantized_per_channel = (tmp_weights.data_type() == DataType::QSYMM8_PER_CHANNEL);
156  quantization::calculate_quantized_multipliers(iqinfo, wqinfo, oqinfo, output_info);
157 
158  _mm_gemmlowp = std::make_unique<CpuGemmLowpMatrixMultiplyCore>();
159  _mm_gemmlowp->configure(&tmp_src, &tmp_weights, biases, dst, GEMMInfo(false, false, true, gemm_3d_depth, _skip_im2col, false, output_info, false, enable_fast_math, false, act_info));
160 
161  auto mm_mem_req = _mm_gemmlowp->workspace();
162  for(unsigned int cont = 0; cont < mm_mem_req.size(); ++cont)
163  {
164  _aux_mem[cont] = mm_mem_req[cont];
165  }
166  }
167  else
168  {
169  // Configure matrix multiply function
170  _mm_gemm = std::make_unique<CpuGemm>();
171  _mm_gemm->configure(src, weights, biases, dst, 1.0f, 0.0f, gemm_info);
172  auto mm_mem_req = _mm_gemm->workspace();
173  for(unsigned int cont = 0; cont < mm_mem_req.size(); ++cont)
174  {
175  _aux_mem[cont] = mm_mem_req[cont];
176  }
177  }
178 }
179 
180 Status CpuGemmConv2d::validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
181  const ActivationLayerInfo &act_info, bool enable_fast_math, int gemm_3d_depth, bool skip_im2col)
182 {
183  const DataType data_type = src->data_type();
184  const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
185  const bool is_activation_enabled = act_info.enabled();
186 
187  // Create GEMMInfo structure
188  const GEMMInfo gemm_info = GEMMInfo(false, false, true /* Reshape weights only for the first run */,
189  gemm_3d_depth, skip_im2col /* Reinterpret the input as 3D if im2col is skipped */,
190  false, GEMMLowpOutputStageInfo(), false, enable_fast_math, false, act_info);
191 
192  if(is_quantized)
193  {
194  // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
195  // Extract and negate input and weights offset
196  const QuantizationInfo &iqinfo = src->quantization_info();
197  const QuantizationInfo &wqinfo = weights->quantization_info();
198  const QuantizationInfo &oqinfo = (dst->total_size() == 0) ? iqinfo : dst->quantization_info();
199  const UniformQuantizationInfo uoqinfo = oqinfo.uniform();
200 
201  // Merge activation with output stage
202  PixelValue type_min{};
203  PixelValue type_max{};
204  std::tie(type_min, type_max) = get_min_max(data_type);
205  int32_t min_activation = type_min.get<int32_t>();
206  int32_t max_activation = type_max.get<int32_t>();
207 
208  const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
211  };
212  if(is_activation_enabled && supported_acts.count(act_info.activation()) != 0)
213  {
214  std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, uoqinfo);
215  }
216 
219  output_info.gemmlowp_offset = uoqinfo.offset;
220  output_info.gemmlowp_min_bound = min_activation;
221  output_info.gemmlowp_max_bound = max_activation;
224 
225  // Perform validation step on GEMMLowp
226  std::unique_ptr<ITensorInfo> input_qa = src->clone();
227  std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
228  input_qa->set_quantization_info(QuantizationInfo(iqinfo.uniform().scale, -iqinfo.uniform().offset));
229  weights_qa->set_quantization_info(QuantizationInfo(wqinfo.uniform().scale, -wqinfo.uniform().offset));
230  return CpuGemmLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, dst, GEMMInfo(false, false, true, gemm_3d_depth, skip_im2col, false, output_info, false, enable_fast_math,
231  false, act_info));
232  }
233  else
234  {
235  // Perform validation step on Matrix multiply function
236  return CpuGemm::validate(src, weights, nullptr, dst, 1.0f, 0.0f, gemm_info);
237  }
238 }
239 
240 Status CpuGemmConv2d::validate_gemm3d(const ITensorInfo *input_info, const ITensorInfo *weights_info, const ActivationLayerInfo &act_info, int gemm_3d_depth, bool skip_im2col)
241 {
242  const DataType data_type = input_info->data_type();
243  const unsigned int mult_y = skip_im2col ? 1U : gemm_3d_depth;
244  const unsigned int mult_z = skip_im2col ? gemm_3d_depth : 1U;
245 
246  // Set dummy tensor shapes for the validation
247  const TensorInfo dummy_input_info(TensorShape(4U, 4U * mult_y, 1U * mult_z), 1, data_type, input_info->quantization_info());
248  const TensorInfo dummy_weights_info(TensorShape(4U, 4U), 1, data_type, weights_info->quantization_info());
249  const TensorInfo dummy_output_info(TensorShape(4U, 4U, gemm_3d_depth), 1, data_type, input_info->quantization_info());
250 
251  return validate_mm(&dummy_input_info, &dummy_weights_info, nullptr, &dummy_output_info, act_info, false, gemm_3d_depth, skip_im2col);
252 }
253 
254 void CpuGemmConv2d::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
255  const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
256 {
257  ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
258  ARM_COMPUTE_UNUSED(num_groups, weights_info);
260  weights,
261  biases,
262  dst,
263  conv_info,
264  weights_info,
265  dilation,
266  act_info,
267  enable_fast_math,
268  num_groups));
269  ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
270 
271  const DataType data_type = src->data_type();
272  const DataLayout data_layout = src->data_layout();
273  const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
274  const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
275  const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
276 
277  const unsigned int kernel_width = weights->dimension(idx_width);
278  const unsigned int kernel_height = weights->dimension(idx_height);
279 
280  _is_prepared = weights_info.retain_internal_weights();
281  _is_quantized = is_data_type_quantized_asymmetric(src->data_type());
282  _data_layout = data_layout;
283  _skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
284 
285  const ITensorInfo *gemm_input_to_use = src;
286  ITensorInfo *gemm_output_to_use = dst;
287 
288  // Get convolved dimensions
289  unsigned int conv_w = 0;
290  unsigned int conv_h = 0;
291  std::tie(conv_w, conv_h) = scaled_dimensions(src->dimension(idx_width),
292  src->dimension(idx_height),
293  kernel_width,
294  kernel_height,
295  conv_info,
296  dilation);
297  ARM_COMPUTE_ERROR_ON_MSG((dst->dimension(idx_width) != conv_w) || (dst->dimension(idx_height) != conv_h),
298  "Output shape does not match the expected one");
299 
300  // Check if GEMM3D is supported
301  const CpuGemmConv2d::SkipInfo skip_info = CpuGemmConv2d::skip_im_col_info(src, weights, conv_info, dilation, act_info);
302  _skip_im2col = skip_info.skip_im2col;
303  _skip_col2im = skip_info.skip_col2im;
304 
305  // Get parameters from conv_info
306  unsigned int stride_x = 0;
307  unsigned int stride_y = 0;
308  std::tie(stride_x, stride_y) = conv_info.stride();
309 
310  unsigned int mat_weights_cols = weights->dimension(idx_kernels);
311 
312  // _weights_reshaped will be auto configured in the kernel.
313  // Just append biases and do not transpose 1xW as it will be reshaped in CpuGemm
314  _weights_reshape_kernel = std::make_unique<kernels::CpuWeightsReshapeKernel>();
315  _weights_reshape_kernel->configure(weights, nullptr, &_weights_reshaped);
316  _weights_reshaped.set_quantization_info(weights->quantization_info());
317 
318  // Create tensor to store im2col reshaped inputs
319  if(!_skip_im2col)
320  {
321  // Configure
322  _im2col_kernel = std::make_unique<kernels::CpuIm2ColKernel>();
323  _im2col_kernel->configure(src, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, false, dilation);
324 
325  // Update GEMM input
326  gemm_input_to_use = &_im2col_output;
327  }
328 
329  // Create temporary GEMM output tensor in case we cannot skip col2im
330  const DataType output_data_type = data_type == DataType::BFLOAT16 ? DataType::F32 : data_type;
331  if(!_skip_col2im)
332  {
333  TensorShape shape_gemm;
334 
335  // Calculate GEMM output shape
336  shape_gemm = _im2col_output.tensor_shape();
337  shape_gemm.set(0, mat_weights_cols);
338  shape_gemm.set(1, conv_w * conv_h);
339 
340  _gemm_output = TensorInfo(shape_gemm, 1, output_data_type);
342  _gemm_output_3d = TensorInfo(_gemm_output);
343 
344  // Update GEMM output
345  gemm_output_to_use = &_gemm_output;
346  }
347  else
348  {
349  _gemm_output_3d = TensorInfo(*dst);
350  _gemm_output_3d.set_data_type(output_data_type).set_data_layout(src->data_layout()).set_is_resizable(true);
351  _gemm_output = TensorInfo(_gemm_output_3d);
352 
353  // Update GEMM output
354  gemm_output_to_use = &_gemm_output_3d;
355  }
356 
357  // Configure GEMM
358  // In case we need to skip col2im, GEMM3D (gemm_3d_depth != 0) must be called in order to avoid reshaping the output matrix
359  const unsigned int gemm_3d_depth = _skip_col2im ? conv_h : 0;
360  configure_mm(gemm_input_to_use, &_weights_reshaped, biases, gemm_output_to_use, act_info, enable_fast_math, gemm_3d_depth);
361 
362  if(!_skip_col2im && _data_layout == DataLayout::NCHW)
363  {
364  // Configure col2im
365  _col2im_kernel = std::make_unique<kernels::CpuCol2ImKernel>();
366  _col2im_kernel->configure(gemm_output_to_use, dst, Size2D(conv_w, conv_h));
367  }
368  else
369  {
370  // Configure reshape layer
371  _reshape_kernel = std::make_unique<kernels::CpuReshapeKernel>();
372  _reshape_kernel->configure(gemm_output_to_use, dst);
373  }
374 
375  // Check if GEMM transforms weights
376  // Modernise through COMPMID-4535
377  bool gemm_trans_wei = _aux_mem[1].size > 0; // Asm Pretranspose
378  gemm_trans_wei = _mm_gemm != nullptr ? _aux_mem[3].size > 0 : gemm_trans_wei; // Tranpose RHS
379  gemm_trans_wei = _mm_gemmlowp != nullptr ? _aux_mem[5].size > 0 : gemm_trans_wei; // Transpose RHS
380 
381  // Check lifetime
382  _aux_mem[Im2ColOutput] = MemoryInfo(offset_int_vec(Im2ColOutput), MemoryLifetime::Temporary, _im2col_output.total_size());
383  _aux_mem[WeightsReshaped] = MemoryInfo(offset_int_vec(WeightsReshaped), gemm_trans_wei ? MemoryLifetime::Prepare : MemoryLifetime::Persistent, _weights_reshaped.total_size());
384  _aux_mem[GemmOutput] = MemoryInfo(offset_int_vec(GemmOutput), MemoryLifetime::Temporary, _gemm_output.total_size());
385 }
386 
387 Status CpuGemmConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const PadStrideInfo &conv_info,
388  const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
389 {
390  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
391  ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
395  ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups > 1, "Grouping (num_groups != 1) is not supported");
396 
397  const DataLayout data_layout = src->data_layout();
398  const DataType data_type = src->data_type();
399  const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
400  const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
401  const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
402  const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
403 
404  const unsigned int kernel_width = weights->dimension(idx_width);
405  const unsigned int kernel_height = weights->dimension(idx_height);
406 
407  TensorInfo im2col_reshaped_info{};
408  TensorInfo info_gemm{};
409  TensorInfo tmp_info{};
410  TensorInfo weights_reshaped_info{};
411  const ITensorInfo *gemm_input_to_use = src;
412  const ITensorInfo *gemm_output_to_use = dst;
413  const ITensorInfo *weights_to_use = weights;
414 
415  const bool append_bias = false;
416  const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
417  const bool is_bf16 = data_type == DataType::BFLOAT16;
418 
419  // Get convolved dimensions
420  unsigned int conv_w = 0;
421  unsigned int conv_h = 0;
422 
423  std::tie(conv_w, conv_h) = scaled_dimensions(src->dimension(idx_width),
424  src->dimension(idx_height),
425  kernel_width,
426  kernel_height,
427  conv_info,
428  dilation);
429 
430  // Check if GEMM3D is supported
431  const CpuGemmConv2d::SkipInfo skip_info = CpuGemmConv2d::skip_im_col_info(src, weights, conv_info,
432  dilation, act_info);
433  const bool skip_im2col = skip_info.skip_im2col, skip_col2im = skip_info.skip_col2im;
434 
435  ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_channel) != src->dimension(idx_channel));
437 
438  // Validate biases
439  if(biases != nullptr)
440  {
441  if(is_quantized)
442  {
444  }
445  else if(is_bf16)
446  {
448  }
449  else
450  {
452  }
453  ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
455  }
456 
457  unsigned int mat_weights_cols = weights->dimension(idx_kernels);
458  unsigned int mat_weights_rows = weights->dimension(idx_width) * weights->dimension(idx_height) * weights->dimension(idx_channel);
459 
460  weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, append_bias), 1, data_type);
461  weights_reshaped_info.set_quantization_info(weights->quantization_info());
462  weights_to_use = &weights_reshaped_info;
463 
464  if(!skip_im2col)
465  {
466  // Create tensor info for im2col reshaped inputs
467  // For CPU, the batch size is on the fourth dimension
468  TensorShape shape_im2col = src->tensor_shape();
469  shape_im2col.set(0, mat_weights_rows);
470  shape_im2col.set(1, conv_w * conv_h);
471  shape_im2col.set(2, 1);
472 
473  im2col_reshaped_info = TensorInfo(shape_im2col, 1, data_type);
474  im2col_reshaped_info.set_quantization_info(src->quantization_info());
475  ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuIm2ColKernel::validate(src, &im2col_reshaped_info, Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation));
476  gemm_input_to_use = &im2col_reshaped_info;
477  }
478 
479  // Create temporary GEMM output tensor in case we cannot skip col2im
480  const DataType output_data_type = data_type == DataType::BFLOAT16 ? DataType::F32 : data_type;
481  if(!skip_col2im)
482  {
483  TensorShape shape_gemm = gemm_input_to_use->tensor_shape();
484  shape_gemm.set(0, mat_weights_cols);
485  shape_gemm.set(1, conv_w * conv_h);
486  info_gemm = TensorInfo(shape_gemm, 1, output_data_type);
487  }
488  else
489  {
490  info_gemm = TensorInfo(dst->tensor_shape(), 1, output_data_type);
491  }
493  gemm_output_to_use = &info_gemm;
494  ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases, gemm_output_to_use, act_info, enable_fast_math, skip_col2im ? conv_h : 0, skip_im2col));
495 
496  // Validate Col2Im/ReshapeLayer
497  if(!skip_col2im && (data_layout == DataLayout::NCHW))
498  {
499  ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuCol2ImKernel::validate(gemm_output_to_use, dst, Size2D(conv_w, conv_h)));
500  }
501 
502  return Status{};
503 }
504 
506 {
507  prepare(tensors);
508 
509  auto src = tensors.get_const_tensor(ACL_SRC_0);
510  auto dst = tensors.get_tensor(ACL_DST);
511  auto gemm_input_to_use = src;
512 
513  CpuAuxTensorHandler im2col_output(offset_int_vec(Im2ColOutput), _im2col_output, tensors, false);
514  CpuAuxTensorHandler gemm_output(offset_int_vec(GemmOutput), _gemm_output, tensors, false);
515  CpuAuxTensorHandler reshaped_wei(offset_int_vec(WeightsReshaped), _weights_reshaped, tensors, false);
516 
517  bool out_has_padding = _skip_col2im && (dst->info()->padding().bottom != 0 || dst->info()->padding().top != 0);
518  if(!_skip_im2col)
519  {
520  // Run input reshaping
521  unsigned int y_dim = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
522  ITensorPack pack =
523  {
524  { TensorType::ACL_SRC, src },
525  { TensorType::ACL_DST, im2col_output.get() }
526  };
527  NEScheduler::get().schedule_op(_im2col_kernel.get(), y_dim, _im2col_kernel->window(), pack);
528  gemm_input_to_use = im2col_output.get();
529  }
530 
531  // Handle the case where output has top/bottom padding
532  const ITensor *out_to_use = out_has_padding ? gemm_output.get() : dst;
533  Tensor gemm3d;
534  _gemm_output_3d.extend_padding(out_to_use->info()->padding());
535  gemm3d.allocator()->soft_init(_gemm_output_3d);
536  gemm3d.allocator()->import_memory(out_to_use->buffer());
537  auto gemm_output_to_use = gemm_output.get();
538 
539  if(_skip_im2col)
540  {
541  gemm_output_to_use = &gemm3d;
542  }
543  if(_skip_col2im && !out_has_padding)
544  {
545  gemm_output_to_use = dst;
546  }
547 
548  // Runs CpuGemm or CpuGemmLowpMatrixMultiplyCore functions
549  ITensorPack pack_mm = tensors;
550  pack_mm.add_const_tensor(TensorType::ACL_SRC_0, gemm_input_to_use);
551  pack_mm.add_const_tensor(TensorType::ACL_SRC_1, reshaped_wei.get());
552  pack_mm.add_tensor(TensorType::ACL_DST, gemm_output_to_use);
553  if(_is_quantized)
554  {
555  // Run gemmlowp
556  _mm_gemmlowp->run(pack_mm);
557  }
558  else
559  {
560  // Run gemm
561  _mm_gemm->run(pack_mm);
562  }
563 
564  // Reshape output matrix
565  if(!_skip_col2im)
566  {
567  if(_data_layout == DataLayout::NCHW)
568  {
569  ITensorPack pack =
570  {
571  { TensorType::ACL_SRC, gemm_output.get() },
572  { TensorType::ACL_DST, dst }
573  };
574  NEScheduler::get().schedule_op(_col2im_kernel.get(), Window::DimY, _col2im_kernel->window(), pack);
575  }
576  else
577  {
578  ITensorPack pack =
579  {
580  { TensorType::ACL_SRC, gemm_output_to_use },
581  { TensorType::ACL_DST, dst }
582  };
583  NEScheduler::get().schedule_op(_reshape_kernel.get(), Window::DimY, _reshape_kernel->window(), pack);
584  }
585  }
586  else if(out_has_padding)
587  {
588  ITensorPack pack =
589  {
590  { TensorType::ACL_SRC, gemm_output_to_use },
591  { TensorType::ACL_DST, dst }
592  };
593  NEScheduler::get().schedule_op(_reshape_kernel.get(), Window::DimY, _reshape_kernel->window(), pack);
594  }
595 }
596 
598 {
599  if(!_is_prepared)
600  {
601  // Run weights reshaping and mark original weights tensor as unused
602  CpuAuxTensorHandler weights_reshaped(offset_int_vec(WeightsReshaped), _weights_reshaped, tensors);
603  auto weights = tensors.get_const_tensor(TensorType::ACL_SRC_1);
604  ITensorPack pack =
605  {
606  { TensorType::ACL_SRC, weights },
607  { TensorType::ACL_DST, weights_reshaped.get() }
608  };
609  NEScheduler::get().schedule_op(_weights_reshape_kernel.get(), 3, _weights_reshape_kernel->window(), pack);
610  weights->mark_as_unused();
611 
612  // Prepare GEMM
613  ITensorPack gemm_pack = tensors;
614  gemm_pack.add_const_tensor(TensorType::ACL_SRC_1, weights_reshaped.get());
615  _is_quantized ? _mm_gemmlowp->prepare(gemm_pack) : _mm_gemm->prepare(gemm_pack);
616 
617  _is_prepared = true;
618  }
619 }
621 {
622  return _aux_mem;
623 }
624 } // namespace cpu
625 } // namespace arm_compute
unsigned int top
top of the border
Definition: Types.h:390
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
Shape of a tensor.
Definition: TensorShape.h:39
Quantize using a fixed point multiplication.
void soft_init(TensorInfo &input, size_t alignment=0)
Initialize a tensor based with a reference TensorInfo.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(...)
Definition: Validate.h:490
bool enabled() const
Check if initialised.
Definition: Types.h:1675
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
void add_const_tensor(int id, const ITensor *tensor)
Add const tensor to the pack.
Definition: ITensorPack.cpp:49
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
virtual void schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors)=0
Runs the kernel in the same thread as the caller synchronously.
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration of CpuGemm.
Definition: CpuGemm.cpp:152
bool extend_padding(const PaddingSize &padding) override
Update the offset to the first element, the strides and the total size.
Definition: TensorInfo.cpp:247
bool are_reshaped() const
Flag which specifies if the weights tensor has been reshaped.
Definition: Types.h:1868
1 channel, 1 F32 per channel
ITensorInfo & set_data_type(DataType data_type) override
Set the data type to the specified value.
Definition: TensorInfo.cpp:287
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Quantization info when assuming per layer quantization.
unsigned int bottom
bottom of the border
Definition: Types.h:392
int32_t gemmlowp_offset
GEMMLowp output stage offset used for quantizing to QASYMM8.
Definition: Types.h:2041
Status class.
Definition: Error.h:52
int32_t gemmlowp_max_bound
GEMMLowp max value used to saturate down the output result before converting back to QASYMM8...
Definition: Types.h:2045
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Activation Layer Information class.
Definition: Types.h:1625
GEMMLowpOutputStageType type
GEMMLowp output stage type.
Definition: Types.h:2040
Interface for CPU tensor.
Definition: ITensor.h:36
void prepare(ITensorPack &tensors) override
Prepare the function for executing.
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2022 Arm Limited.
std::vector< MemoryInfo > MemoryRequirements
Definition: Types.h:134
1 channel, 1 F16 per channel
std::pair< unsigned int, unsigned int > scaled_dimensions(int width, int height, int kernel_width, int kernel_height, const PadStrideInfo &pad_stride_info, const Size2D &dilation=Size2D(1U, 1U))
Returns expected width and height of output scaled tensor depending on dimensions rounding mode...
Definition: Utils.cpp:427
ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info) override
Set the quantization settings (scale and offset) of the tensor.
Definition: TensorInfo.cpp:346
TensorAllocator * allocator()
Return a pointer to the tensor&#39;s allocator.
Definition: Tensor.cpp:48
bool is_quantized_per_channel
GEMMLowp quantized per-channel flag.
Definition: Types.h:2049
Convolution Layer Weights Information class.
Definition: Types.h:1844
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:159
void run(ITensorPack &tensors) override
Run the kernels contained in the function.
1 channel, 1 S32 per channel
16-bit brain floating-point number
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Definition: ITensorPack.cpp:54
Quantization information.
static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const Size2D &convolved_dims)
Static function to check if given info will lead to a valid configuration.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
Definition: Utils.h:1107
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
virtual ITensorInfo & set_data_layout(const DataLayout &data_layout)=0
Set the data layout of the tensor.
std::pair< int32_t, int32_t > get_quantized_activation_min_max(ActivationLayerInfo act_info, DataType data_type, UniformQuantizationInfo oq_info)
Returns a pair of minimum and maximum values for a quantized activation.
Definition: Utils.cpp:556
Status calculate_quantized_multipliers(const QuantizationInfo &iq_info, const QuantizationInfo &wq_info, const QuantizationInfo &oq_info, GEMMLowpOutputStageInfo &stage_info)
Calculate quantized representation of per-channel multipliers.
static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info=WeightsInfo(), const Size2D &dilation=Size2D(1U, 1U), const ActivationLayerInfo &act_info=ActivationLayerInfo(), bool enable_fast_math=false, unsigned int num_groups=1)
Static function to check if given info will lead to a valid configuration.
quantized, asymmetric fixed-point 8-bit number unsigned
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
const unsigned int num_groups
Definition: Im2Col.cpp:153
size_t total_size() const override
Returns the total size of the tensor in bytes.
Definition: TensorInfo.h:250
virtual uint8_t * buffer() const =0
Interface to be implemented by the child class to return a pointer to CPU memory. ...
std::pair< unsigned int, unsigned int > stride() const
Get the stride.
Definition: Types.h:717
experimental::MemoryRequirements workspace() const override
Return the memory requirements required by the workspace.
UniformQuantizationInfo uniform() const
Return per layer quantization info.
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
GEMMLowp output stage info.
Definition: Types.h:2038
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
Basic implementation of the tensor interface.
Definition: Tensor.h:37
Padding and stride information class.
Definition: Types.h:669
virtual PaddingSize padding() const =0
Padding of tensor.
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
Num samples, channels, height, width.
src_info set_data_layout(data_layout)
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1052
quantized, symmetric per channel fixed-point 8-bit number
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
Definition: ITensorPack.cpp:64
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
TensorShape compute_weights_reshaped_shape(const ITensorInfo &weights, bool has_bias=false, unsigned int num_groups=1)
Calculate the reshaped shape of the weights.
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:541
Num samples, height, width, channels.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
Status import_memory(void *memory)
Import an existing memory as a tensor&#39;s backing memory.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
Tensor packing service.
Definition: ITensorPack.h:39
#define ARM_COMPUTE_LOG_PARAMS(...)
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
int offset_int_vec(int offset)
Definition: MemoryHelpers.h:38
GEMM information class.
Definition: Types.h:2090
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1660
quantized, asymmetric fixed-point 8-bit number signed
int32_t gemmlowp_min_bound
GEMMLowp min value used to saturate down the output result before converting back to QASYMM8...
Definition: Types.h:2044
const TensorShape & tensor_shape() const override
Size for each dimension of the tensor.
Definition: TensorInfo.h:234
void configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info, const WeightsInfo &weights_info=WeightsInfo(), const Size2D &dilation=Size2D(1U, 1U), const ActivationLayerInfo &act_info=ActivationLayerInfo(), bool enable_fast_math=false, unsigned int num_groups=1)
Set the input and output tensors.
DataType
Available data types.
Definition: Types.h:79
DataLayout
[DataLayout enum definition]
Definition: Types.h:113
std::tuple< PixelValue, PixelValue > get_min_max(DataType dt)
Compute the mininum and maximum values a data type can take.
Definition: Utils.h:564
TensorShape & set(size_t dimension, size_t value, bool apply_dim_correction=true, bool increase_dim_unit=true)
Accessor to set the value of one of the dimensions.
Definition: TensorShape.h:79
bool retain_internal_weights() const
Definition: Types.h:1888
void add_tensor(int id, ITensor *tensor)
Add tensor to the pack.
Definition: ITensorPack.cpp:39
static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation=Size2D(1U, 1U), unsigned int num_groups=1)
Static function to check if given info will lead to a valid configuration.
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
static IScheduler & get()
Access the scheduler singleton.
Definition: Scheduler.cpp:94
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *dst, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration.