Compute Library
 22.11
CpuIm2ColKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
31 #include "arm_compute/core/Types.h"
33 #include "src/core/CPP/Validate.h"
36 
38 
39 #include <arm_neon.h>
40 #include <cstddef>
41 #include <cstdint>
42 #include <cstring>
43 #include <tuple>
44 
45 namespace arm_compute
46 {
47 using namespace misc::shape_calculator;
48 namespace cpu
49 {
50 namespace kernels
51 {
52 namespace
53 {
54 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info,
55  bool has_bias, const Size2D &dilation, unsigned int num_groups)
56 {
60  ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized(input->data_type()) && has_bias);
61  ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1));
62  ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups > 1, "Number of groups greater than one are not supported on Neon");
63 
64  // Since there's no implicit padding added, check the total input spatial dimensions (with conv paddings) are big enough for the kernel dimensions
65  const unsigned int width_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
66  const unsigned int height_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
67  const unsigned total_width = input->dimension(width_idx) + conv_info.pad_left() + conv_info.pad_right();
68  const unsigned total_height = input->dimension(height_idx) + conv_info.pad_top() + conv_info.pad_bottom();
69  ARM_COMPUTE_RETURN_ERROR_ON((total_width < kernel_dims.width) || (total_height < kernel_dims.height));
70 
71  if(output->total_size() > 0)
72  {
73  TensorInfo expected_output = output->clone()->set_tensor_shape(compute_im2col_conv_shape(input, kernel_dims, conv_info, has_bias, dilation, false));
74  ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&expected_output, output);
77  }
78 
79  return Status{};
80 }
81 
82 template <typename T, bool has_pads>
83 inline void linearize_volume_nchw(const uint8_t *const in_ptr,
84  T *out_ptr,
85  bool has_bias,
86  int top_left_x,
87  int top_left_y,
88  int kernel_width,
89  int kernel_height,
90  int kernel_depth,
91  int input_w,
92  int input_h,
93  int input_stride_x,
94  int input_stride_y,
95  int input_stride_z,
96  int pad_value,
97  int dilation_x,
98  int dilation_y)
99 {
100  const int kernel_size2 = kernel_width * kernel_height;
101  const int x_e = top_left_x + kernel_width * dilation_x;
102  const int y_e = top_left_y + kernel_height * dilation_y;
103 
104  // Linearize volume
105  int d = 0;
106  // This for loop linearize a volume with 3 slices. This allows:
107  // 1) to reduce the iterations of the outer for loop "d"
108  // 2) to have an optimized im2col for the first convolution layer where usually we have 3 IFMs
109  for(; d <= (kernel_depth - 3); d += 3)
110  {
111  for(int y = top_left_y; y < y_e; y += dilation_y)
112  {
113  if((y < 0 || y >= input_h) && has_pads)
114  {
115  // All the values will be the offset (will be zeros when not quantized)
116  for(int x = top_left_x; x < x_e; x += dilation_x, ++out_ptr)
117  {
118  *(out_ptr + 0 * kernel_size2) = pad_value;
119  *(out_ptr + 1 * kernel_size2) = pad_value;
120  *(out_ptr + 2 * kernel_size2) = pad_value;
121  }
122  }
123  else
124  {
125  for(int x = top_left_x; x < x_e; x += dilation_x, ++out_ptr)
126  {
127  if((x < 0 || x >= input_w) && has_pads)
128  {
129  *(out_ptr + 0 * kernel_size2) = pad_value;
130  *(out_ptr + 1 * kernel_size2) = pad_value;
131  *(out_ptr + 2 * kernel_size2) = pad_value;
132  }
133  else
134  {
135  *(out_ptr + 0 * kernel_size2) = *(reinterpret_cast<const T *>(in_ptr + ((d + 0) * input_stride_z + y * input_stride_y + x * input_stride_x)));
136  *(out_ptr + 1 * kernel_size2) = *(reinterpret_cast<const T *>(in_ptr + ((d + 1) * input_stride_z + y * input_stride_y + x * input_stride_x)));
137  *(out_ptr + 2 * kernel_size2) = *(reinterpret_cast<const T *>(in_ptr + ((d + 2) * input_stride_z + y * input_stride_y + x * input_stride_x)));
138  }
139  }
140  }
141  }
142  out_ptr += 2 * kernel_size2;
143  }
144 
145  // Left over
146  for(; d < kernel_depth; d++)
147  {
148  for(int y = top_left_y; y < y_e; y += dilation_y)
149  {
150  if((y < 0 || y >= input_h) && has_pads)
151  {
152  // All the values will be the offset (will be zeros when not quantized)
153  memset(static_cast<void *>(out_ptr), pad_value, kernel_width * sizeof(T));
154  out_ptr += kernel_width;
155  }
156  else
157  {
158  for(int x = top_left_x; x < x_e; x += dilation_x, ++out_ptr)
159  {
160  if((x < 0 || x >= input_w) && has_pads)
161  {
162  *out_ptr = pad_value;
163  }
164  else
165  {
166  *out_ptr = *(reinterpret_cast<const T *>(in_ptr + (d * input_stride_z + y * input_stride_y + x * input_stride_x)));
167  }
168  }
169  }
170  }
171  }
172 
173  // Append 1 if the convolution layer has biases
174  if(has_bias)
175  {
176  *out_ptr = static_cast<T>(1);
177  }
178 }
179 
180 template <typename T, bool has_pads>
181 inline void linearize_volume_nhwc(const uint8_t *const in_ptr,
182  T *out_ptr,
183  bool has_bias,
184  int start_x,
185  int start_y,
186  int kernel_width,
187  int kernel_height,
188  int input_w,
189  int input_h,
190  int input_c,
191  int input_stride_y,
192  int input_stride_z,
193  int pad_value,
194  int dilation_x,
195  int dilation_y)
196 {
197  const int end_x = start_x + kernel_width * dilation_x;
198  const int end_y = start_y + kernel_height * dilation_y;
199  const int pad_quant = kernel_width * input_c;
200  const int element_size = static_cast<int>(sizeof(T));
201  if((start_y >= 0) && (end_y < input_h) && (start_x >= 0) && (end_x < input_w) && (dilation_x == 1) && (input_stride_y == input_c * element_size))
202  {
203  for(int y = start_y; y < end_y; y += dilation_y)
204  {
205  //optimized for no dilation and no boundary pixels
206  memcpy(out_ptr, reinterpret_cast<const T *>(in_ptr + (y * input_stride_z + start_x * input_stride_y)), input_c * kernel_width * element_size);
207  out_ptr += input_c * kernel_width;
208  }
209  }
210  else
211  {
212  for(int y = start_y; y < end_y; y += dilation_y)
213  {
214  if(y < 0 || y >= input_h)
215  {
216  memset(static_cast<void *>(out_ptr), pad_value, pad_quant * element_size);
217  out_ptr += pad_quant;
218  }
219  else if(dilation_x > 1 || start_x < 0 || end_x >= input_w || input_stride_y != input_c * element_size)
220  {
221  for(int x = start_x; x < end_x; x += dilation_x)
222  {
223  if(x < 0 || x >= input_w)
224  {
225  memset(static_cast<void *>(out_ptr), pad_value, input_c * element_size);
226  out_ptr += input_c;
227  }
228  else
229  {
230  memcpy(out_ptr, reinterpret_cast<const T *>(in_ptr + (y * input_stride_z + x * input_stride_y)), input_c * element_size);
231  out_ptr += input_c;
232  }
233  }
234  }
235  else
236  {
237  //optimized for no dilation and no boundary pixels
238  memcpy(out_ptr, reinterpret_cast<const T *>(in_ptr + (y * input_stride_z + start_x * input_stride_y)), input_c * kernel_width * element_size);
239  out_ptr += input_c * kernel_width;
240  }
241  }
242  }
243  // Append 1 if the convolution layer has biases
244  if(has_bias)
245  {
246  *out_ptr = static_cast<T>(1);
247  }
248 }
249 } // namespace
250 
251 template <typename T, bool has_pads, bool is_nchw>
252 void CpuIm2ColKernel::run_im2col(const ITensor *src, ITensor *dst, const Window &window)
253 {
255  ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
256 
257  const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
258  const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
259  const unsigned int channel_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
260 
261  const int input_w = src->info()->dimension(width_idx);
262  const int input_h = src->info()->dimension(height_idx);
263  const int input_c = src->info()->dimension(channel_idx);
264  const int input_stride_x = src->info()->strides_in_bytes().x();
265  const int input_stride_y = src->info()->strides_in_bytes().y();
266  const int input_stride_z = src->info()->strides_in_bytes().z();
267  const int pad_left = _conv_info.pad_left();
268  const int pad_top = _conv_info.pad_top();
269  const int stride_x = _conv_info.stride().first;
270  const int stride_y = _conv_info.stride().second;
271  const int pad_value = is_data_type_quantized(src->info()->data_type()) ? src->info()->quantization_info().uniform().offset : 0;
272 
273  Window window_in_out(window);
274  // The first three dimensions of the input and output are increased by the inner loops
275  window_in_out.set(Window::DimX, Window::Dimension(0, 0, 0));
276  window_in_out.set(Window::DimY, Window::Dimension(0, 0, 0));
277  window_in_out.set(Window::DimZ, Window::Dimension(0, 0, 0));
278 
279  // Create iterators
280  Iterator in(src, window_in_out);
281  Iterator out(dst, window_in_out);
282 
283  execute_window_loop(window, [&](const Coordinates & id)
284  {
285  const int start_w = id[width_idx] * stride_x - pad_left;
286  const int start_h = id[height_idx] * stride_y - pad_top;
287 
288  // Get pointers
289  const uint8_t *const input_ptr = in.ptr();
290  auto output_ptr = reinterpret_cast<T *>(out.ptr() + (id[width_idx] + id[height_idx] * _convolved_dims.first) * dst->info()->strides_in_bytes().y());
291 
292  // Linearize volume
293  if(is_nchw)
294  {
295  linearize_volume_nchw<T, has_pads>(input_ptr,
296  output_ptr,
297  _has_bias,
298  start_w,
299  start_h,
300  _kernel_width,
301  _kernel_height,
302  input_c,
303  input_w,
304  input_h,
305  input_stride_x,
308  pad_value,
309  _dilation.x(),
310  _dilation.y());
311  }
312  else
313  {
314  linearize_volume_nhwc<T, has_pads>(input_ptr,
315  output_ptr,
316  _has_bias,
317  start_w,
318  start_h,
319  _kernel_width,
320  _kernel_height,
321  input_w,
322  input_h,
323  input_c,
326  pad_value,
327  _dilation.x(),
328  _dilation.y());
329  }
330  },
331  in, out);
332 }
333 
334 void CpuIm2ColKernel::configure(const ITensorInfo *src, ITensorInfo *dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info,
335  bool has_bias, const Size2D &dilation, unsigned int num_groups)
336 {
338  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, kernel_dims, conv_info, has_bias, dilation, num_groups));
339  ARM_COMPUTE_UNUSED(num_groups);
340 
341  _data_layout = src->data_layout();
342  const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
343  const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
344  const unsigned int channel_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
345 
346  _conv_info = conv_info;
347  _kernel_width = kernel_dims.width;
348  _kernel_height = kernel_dims.height;
349  _dilation = dilation;
350  _convolved_dims = scaled_dimensions(src->dimension(width_idx), dst->dimension(height_idx),
351  _kernel_width, _kernel_height,
352  _conv_info, _dilation);
353  _has_bias = has_bias;
354 
355  if(_data_layout == DataLayout::NCHW)
356  {
357  switch(src->data_type())
358  {
359  case DataType::F32:
360  _func = (!conv_info.has_padding()) ? &CpuIm2ColKernel::run_im2col<float, false, true> : &CpuIm2ColKernel::run_im2col<float, true, true>;
361  break;
362 #if defined(ARM_COMPUTE_ENABLE_BF16)
363  case DataType::BFLOAT16:
364  _func = (!conv_info.has_padding()) ? &CpuIm2ColKernel::run_im2col<bfloat16, false, true> : &CpuIm2ColKernel::run_im2col<bfloat16, true, true>;
365  break;
366 #endif /* defined(ARM_COMPUTE_ENABLE_BF16) */
367 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
368  case DataType::F16:
369  _func = (!conv_info.has_padding()) ? &CpuIm2ColKernel::run_im2col<float16_t, false, true> : &CpuIm2ColKernel::run_im2col<float16_t, true, true>;
370  break;
371 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
373  case DataType::QASYMM8:
374  _func = (!conv_info.has_padding()) ? &CpuIm2ColKernel::run_im2col<qasymm8_t, false, true> : &CpuIm2ColKernel::run_im2col<qasymm8_t, true, true>;
375  break;
376  default:
377  ARM_COMPUTE_ERROR("Data type not supported");
378  break;
379  }
380  }
381  else
382  {
383  switch(src->data_type())
384  {
385  case DataType::F32:
386  _func = (!conv_info.has_padding()) ? &CpuIm2ColKernel::run_im2col<float, false, false> : &CpuIm2ColKernel::run_im2col<float, true, false>;
387  break;
388 #if defined(ARM_COMPUTE_ENABLE_BF16)
389  case DataType::BFLOAT16:
390  _func = (!conv_info.has_padding()) ? &CpuIm2ColKernel::run_im2col<bfloat16, false, false> : &CpuIm2ColKernel::run_im2col<bfloat16, true, false>;
391  break;
392 #endif /* defined(ARM_COMPUTE_ENABLE_BF16) */
393 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
394  case DataType::F16:
395  _func = (!conv_info.has_padding()) ? &CpuIm2ColKernel::run_im2col<float16_t, false, false> : &CpuIm2ColKernel::run_im2col<float16_t, true, false>;
396  break;
397 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
398  case DataType::QASYMM8:
399  _func = (!conv_info.has_padding()) ? &CpuIm2ColKernel::run_im2col<uint8_t, false, false> : &CpuIm2ColKernel::run_im2col<qasymm8_t, true, false>;
400  break;
402  _func = (!conv_info.has_padding()) ? &CpuIm2ColKernel::run_im2col<int8_t, false, false> : &CpuIm2ColKernel::run_im2col<qasymm8_t, true, false>;
403  break;
404  default:
405  ARM_COMPUTE_ERROR("Data type not supported");
406  break;
407  }
408  }
409 
410  // Output tensor auto initialization if not yet initialized
411  auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_im2col_conv_shape(src, kernel_dims, conv_info, has_bias, dilation, false)));
412 
413  std::pair<unsigned int, unsigned int> convolved_dims = scaled_dimensions(src->dimension(width_idx), src->dimension(height_idx),
414  kernel_dims.width, kernel_dims.height,
415  conv_info, dilation);
416 
417  Window win = calculate_max_window(*src, Steps());
418  win.set(width_idx, Window::Dimension(0, convolved_dims.first, 1));
419  win.set(height_idx, Window::Dimension(0, convolved_dims.second, 1));
420  win.set(channel_idx, Window::Dimension(0, 1, 1));
421  // Configure kernel window
423 }
424 
425 Status CpuIm2ColKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info,
426  bool has_bias, const Size2D &dilation, unsigned int num_groups)
427 {
428  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, kernel_dims, conv_info, has_bias, dilation, num_groups));
429  return Status{};
430 }
431 
432 void CpuIm2ColKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
433 {
434  ARM_COMPUTE_UNUSED(info);
436  ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
437 
438  auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
439  auto dst = tensors.get_tensor(TensorType::ACL_DST);
440  (this->*_func)(src, dst, window);
441 }
442 const char *CpuIm2ColKernel::name() const
443 {
444  return "CpuIm2ColKernel";
445 }
446 
447 size_t CpuIm2ColKernel::get_mws(const CPUInfo &platform, size_t thread_count) const
448 {
449  ARM_COMPUTE_UNUSED(thread_count);
450  ARM_COMPUTE_UNUSED(platform);
451 
453 }
454 } // namespace kernels
455 } // namespace cpu
456 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:1030
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
Definition: Validate.h:115
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(...)
Definition: Validate.h:606
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:79
Status class.
Definition: Error.h:52
SimpleTensor< uint8_t > expected_output(output_shape, DataType::QASYMM8, 1, qasymm)
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2022 Arm Limited.
size_t height
Height of the image region or rectangle.
Definition: Size2D.h:91
1 channel, 1 F16 per channel
std::pair< unsigned int, unsigned int > scaled_dimensions(int width, int height, int kernel_width, int kernel_height, const PadStrideInfo &pad_stride_info, const Size2D &dilation=Size2D(1U, 1U))
Returns expected width and height of output scaled tensor depending on dimensions rounding mode...
Definition: Utils.cpp:429
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:159
16-bit brain floating-point number
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Definition: ITensorPack.cpp:54
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
quantized, asymmetric fixed-point 8-bit number unsigned
Class to describe a number of elements in each dimension.
Definition: Steps.h:40
const unsigned int num_groups
Definition: Im2Col.cpp:153
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
Padding and stride information class.
Definition: Types.h:669
const char * name
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:915
Num samples, channels, height, width.
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
Definition: ITensorPack.cpp:64
Information about executing thread and CPU.
Definition: CPPTypes.h:179
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:439
size_t width
Width of the image region or rectangle.
Definition: Size2D.h:90
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
const size_t input_stride_y
Definition: impl.cpp:50
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:541
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
Tensor packing service.
Definition: ITensorPack.h:39
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
quantized, asymmetric fixed-point 8-bit number signed
im2col_func configure(src_target.info(), dst_target.info(), spatial_kernel, conv_info, has_bias)
TensorShape compute_im2col_conv_shape(const ITensorInfo *input, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, bool batch_size_on_z, unsigned int num_groups=1)
Calculate the im2col output shape of a tensor.
Describe a multidimensional execution window.
Definition: Window.h:39
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:201
const size_t input_stride_z
Definition: impl.cpp:51
static constexpr size_t default_mws
Definition: ICPPKernel.h:41
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
bool has_padding() const
Check whether this has any padding.
Definition: Types.h:770