Compute Library
 22.05
CpuDirectConv2dOutputStageKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
29 #include "arm_compute/core/Types.h"
33 #include "src/core/CPP/Validate.h"
34 #include "src/core/NEON/NEAsymm.h"
39 
40 #include <arm_neon.h>
41 #include <cstddef>
42 #include <cstdint>
43 
44 namespace arm_compute
45 {
46 namespace cpu
47 {
48 namespace kernels
49 {
50 namespace
51 {
52 Status validate_arguments(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst,
53  const DirectConvolutionLayerOutputStageKernelInfo &info)
54 {
57  ARM_COMPUTE_RETURN_ERROR_ON(src->data_layout() == DataLayout::UNKNOWN);
59 
60  if(bias != nullptr)
61  {
63  ARM_COMPUTE_RETURN_ERROR_ON(bias->dimension(0) != src->dimension(get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::CHANNEL)));
64  ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
65  }
66 
67  if(src->data_type() == DataType::S32)
68  {
69  ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst == nullptr, "In-place computation not allowed for quantized output");
70  }
71 
72  // Checks performed when output is configured
73  if((dst != nullptr) && (dst->total_size() != 0))
74  {
75  if(is_data_type_float(src->data_type()))
76  {
78  }
79  else
80  {
82  }
84  }
85  else if(src->data_type() == DataType::S32)
86  {
87  // In case of quantized computation and unconfigured output, the output data type must be provided through DirectConvolutionLayerOutputStageKernelInfo
88  ARM_COMPUTE_RETURN_ERROR_ON((info.output_data_type != DataType::QASYMM8) && (info.output_data_type != DataType::QASYMM8_SIGNED));
89  }
90 
91  return Status{};
92 }
93 
94 template <typename T>
95 typename std::enable_if<arm_compute::utils::traits::is_floating_point<T>::value, void>::type
96 output_stage_nchw(ITensor *src, const ITensor *bias, const Window &window, ITensor *dst,
97  int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift)
98 {
99  const bool has_bias = bias != nullptr;
100  /** SIMD vector tag type. */
101  using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
102 
103  ARM_COMPUTE_ERROR_ON(src->info()->data_layout() == DataLayout::UNKNOWN);
104  ARM_COMPUTE_UNUSED(result_fixedpoint_multiplier);
105  ARM_COMPUTE_UNUSED(result_shift);
106  ARM_COMPUTE_UNUSED(result_offset_after_shift);
107 
108  const int window_start_x = window.x().start();
109  const int window_end_x = window.x().end();
110  const int window_step_x = 16 / src->info()->element_size();
111  Window win = window;
112  win.set(Window::DimX, Window::Dimension(0, 1, 1));
113 
114  Iterator in(src, win);
115  Iterator out(dst, win);
116  execute_window_loop(win, [&](const Coordinates & id)
117  {
118  int x = window_start_x;
119  for(; x <= (window_end_x - window_step_x); x += window_step_x)
120  {
121  // Get bias and pointer to input
122  const auto in_ptr = reinterpret_cast<const T *>(in.ptr()) + x;
123  auto v_in = wrapper::vloadq(in_ptr);
124 
125  // Accumulate bias
126  if(has_bias)
127  {
128  const auto vb = wrapper::vdup_n(*reinterpret_cast<const T *>(bias->ptr_to_element(Coordinates(id.z()))), ExactTagType{});
129  v_in = wrapper::vadd(v_in, vb);
130  }
131 
132  const auto out_ptr = reinterpret_cast<T *>(out.ptr()) + x;
133  wrapper::vstore(out_ptr, v_in);
134  }
135 
136  // Left-overs loop
137  for(; x < window_end_x; ++x)
138  {
139  // Get bias and pointer to input
140  auto s_in = *(reinterpret_cast<const T *>(in.ptr()) + x);
141 
142  // Accumulate bias
143  if(has_bias)
144  {
145  const auto b = *reinterpret_cast<const T *>(bias->ptr_to_element(Coordinates(id.z())));
146  s_in += b;
147  }
148 
149  *(reinterpret_cast<T *>(out.ptr()) + x) = s_in;
150  }
151 
152  },
153  in, out);
154 }
155 
156 template <typename T>
157 typename std::enable_if<arm_compute::utils::traits::is_floating_point<T>::value, void>::type
158 output_stage_nhwc(ITensor *src, const ITensor *bias, const Window &window, ITensor *dst,
159  int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift)
160 {
161  const bool has_bias = bias != nullptr;
162  ARM_COMPUTE_UNUSED(result_fixedpoint_multiplier);
163  ARM_COMPUTE_UNUSED(result_shift);
164  ARM_COMPUTE_UNUSED(result_offset_after_shift);
165 
166  Window window_bias = window;
167  window_bias.set(Window::DimX, Window::Dimension(0, 1, 1));
168  window_bias.set(Window::DimY, Window::Dimension(0, 0, 0));
169  window_bias.set(Window::DimZ, Window::Dimension(0, 0, 0));
170  window_bias.set(3, Window::Dimension(0, 0, 0));
171 
172  const int window_start_x = window.x().start();
173  const int window_end_x = window.x().end();
174  const int window_step_x = 16 / src->info()->element_size();
175  Window win = window;
176  win.set(Window::DimX, Window::Dimension(0, 1, 1));
177 
178  Iterator in(src, win);
179  Iterator bi(bias, window_bias);
180  Iterator out(dst, win);
181 
182  execute_window_loop(win, [&](const Coordinates &)
183  {
184  int x = window_start_x;
185  for(; x <= (window_end_x - window_step_x); x += window_step_x)
186  {
187  // Get bias and pointer to input
188  const auto in_ptr = reinterpret_cast<const T *>(in.ptr());
189  auto v_in = wrapper::vloadq(in_ptr + x);
190 
191  // Accumulate bias
192  if(has_bias)
193  {
194  const auto bias_ptr = reinterpret_cast<T *>(bi.ptr()) + x;
195  v_in = wrapper::vadd(v_in, wrapper::vloadq(bias_ptr));
196  }
197 
198  const auto out_ptr = reinterpret_cast<T *>(out.ptr());
199  wrapper::vstore(out_ptr + x, v_in);
200  }
201 
202  // Left-overs loop
203  for(; x < window_end_x; ++x)
204  {
205  // Get bias and pointer to input
206  auto s_in = *(reinterpret_cast<const T *>(in.ptr()) + x);
207 
208  // Accumulate bias
209  if(has_bias)
210  {
211  const auto bias_ptr = reinterpret_cast<T *>(bi.ptr()) + x;
212  s_in += *bias_ptr;
213  }
214 
215  const auto out_ptr = reinterpret_cast<T *>(out.ptr());
216  *(out_ptr + x) = s_in;
217  }
218  },
219  in, bi, out);
220 }
221 
222 // Quantized case
223 template < typename TOut, typename std::enable_if < std::is_same<TOut, uint8_t>::value || std::is_same<TOut, int8_t>::value, int >::type = 0 >
224 void output_stage_nchw(ITensor *src, const ITensor *bias, const Window &window, ITensor *dst,
225  int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift)
226 {
227  const bool has_bias = bias != nullptr;
228  using VectorType = typename wrapper::traits::neon_bitvector_t<TOut, wrapper::traits::BitWidth::W128>;
229  using TagType = typename wrapper::traits::neon_bitvector_tag_t<TOut, wrapper::traits::BitWidth::W128>;
230 
231  const int32x4_t result_offset_after_shift_s32 = vdupq_n_s32(result_offset_after_shift);
232 
233  const VectorType min = wrapper::vdup_n(std::numeric_limits<TOut>::lowest(), TagType{});
234  const VectorType max = wrapper::vdup_n(std::numeric_limits<TOut>::max(), TagType{});
235 
236  const int window_start_x = window.x().start();
237  const int window_end_x = window.x().end();
238  const int window_step_x = 16 / src->info()->element_size();
239  Window win = window;
240  win.set(Window::DimX, Window::Dimension(0, 1, 1));
241 
242  Iterator in(src, win);
243  Iterator out(dst, win);
244 
245  execute_window_loop(win, [&](const Coordinates & id)
246  {
247 
248  int x = window_start_x;
249  for(; x <= (window_end_x - window_step_x); x += window_step_x)
250  {
251  // Get bias and pointer to input
252  const auto in_ptr = reinterpret_cast<int32_t *>(in.ptr()) + x;
253  int32x4x4_t v_in =
254  {
255  {
256  wrapper::vloadq(in_ptr),
257  wrapper::vloadq(in_ptr + 4),
258  wrapper::vloadq(in_ptr + 8),
259  wrapper::vloadq(in_ptr + 12)
260  }
261  };
262 
263  // Accumulate bias
264  if(has_bias)
265  {
266  const auto vb = wrapper::vdup_n(*reinterpret_cast<const int32_t *>(bias->ptr_to_element(Coordinates(id.z()))), TagType{});
267  v_in =
268  {
269  {
270  wrapper::vadd(v_in.val[0], vb),
271  wrapper::vadd(v_in.val[1], vb),
272  wrapper::vadd(v_in.val[2], vb),
273  wrapper::vadd(v_in.val[3], vb)
274  }
275  };
276  }
277 
278  const auto out_ptr = reinterpret_cast<TOut *>(out.ptr()) + x;
279  wrapper::vstore(out_ptr, finalize_quantization(v_in, result_fixedpoint_multiplier, result_shift, result_offset_after_shift_s32,
280  min, max, false));
281  }
282 
283  // Left-overs loop
284  for(; x < window_end_x; ++x)
285  {
286  // Get bias and pointer to input
287  int32_t s_in = *(reinterpret_cast<const int32_t *>(in.ptr()) + x);
288 
289  // Accumulate bias
290  if(has_bias)
291  {
292  const auto b = *reinterpret_cast<const int32_t *>(bias->ptr_to_element(Coordinates(id.z())));
293  s_in += b;
294  }
295 
296  const auto out_ptr = reinterpret_cast<TOut *>(out.ptr()) + x;
297  *out_ptr = finalize_quantization(s_in, result_fixedpoint_multiplier, result_shift, result_offset_after_shift,
298  std::numeric_limits<TOut>::lowest(), std::numeric_limits<TOut>::max(), false);
299  }
300  },
301  in, out);
302 }
303 template < typename TOut, typename std::enable_if < std::is_same<TOut, uint8_t>::value || std::is_same<TOut, int8_t>::value, int >::type = 0 >
304 void output_stage_nhwc(ITensor *src, const ITensor *bias, const Window &window, ITensor *dst,
305  int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift)
306 {
307  const bool has_bias = bias != nullptr;
308  using VectorType = typename wrapper::traits::neon_bitvector_t<TOut, wrapper::traits::BitWidth::W128>;
309  using TagType = typename wrapper::traits::neon_bitvector_tag_t<TOut, wrapper::traits::BitWidth::W128>;
310 
311  const int32x4_t result_offset_after_shift_s32 = vdupq_n_s32(result_offset_after_shift);
312 
313  const VectorType min = wrapper::vdup_n(std::numeric_limits<TOut>::lowest(), TagType{});
314  const VectorType max = wrapper::vdup_n(std::numeric_limits<TOut>::max(), TagType{});
315 
316  Window window_bias = window;
317  window_bias.set(Window::DimX, Window::Dimension(0, 1, 1));
318  window_bias.set(Window::DimY, Window::Dimension(0, 0, 0));
319  window_bias.set(Window::DimZ, Window::Dimension(0, 0, 0));
320  window_bias.set(3, Window::Dimension(0, 0, 0));
321 
322  const int window_start_x = window.x().start();
323  const int window_end_x = window.x().end();
324  const int window_step_x = 16 / src->info()->element_size();
325  Window win = window;
326  win.set(Window::DimX, Window::Dimension(0, 1, 1));
327 
328  Iterator in(src, win);
329  Iterator bi(bias, window_bias);
330  Iterator out(dst, win);
331 
332  execute_window_loop(win, [&](const Coordinates &)
333  {
334  int x = window_start_x;
335  for(; x <= (window_end_x - window_step_x); x += window_step_x)
336  {
337  // Get bias and pointer to input
338  const auto in_ptr = reinterpret_cast<int32_t *>(in.ptr()) + x;
339  int32x4x4_t v_in =
340  {
341  {
342  wrapper::vloadq(in_ptr),
343  wrapper::vloadq(in_ptr + 4),
344  wrapper::vloadq(in_ptr + 8),
345  wrapper::vloadq(in_ptr + 12),
346  }
347  };
348 
349  // Accumulate bias
350  if(has_bias)
351  {
352  const auto bias_ptr = reinterpret_cast<int32_t *>(bi.ptr()) + x;
353 
354  wrapper::vadd(v_in.val[0], wrapper::vloadq(bias_ptr));
355  wrapper::vadd(v_in.val[1], wrapper::vloadq(bias_ptr + 4));
356  wrapper::vadd(v_in.val[2], wrapper::vloadq(bias_ptr + 8));
357  wrapper::vadd(v_in.val[3], wrapper::vloadq(bias_ptr + 12));
358  }
359 
360  const auto out_ptr = reinterpret_cast<TOut *>(out.ptr()) + x;
361  wrapper::vstore(out_ptr, finalize_quantization(v_in, result_fixedpoint_multiplier, result_shift, result_offset_after_shift_s32, min, max, false));
362  }
363 
364  // Left-overs loop
365  for(; x < window_end_x; ++x)
366  {
367  // Get bias and pointer to input
368  const auto in_ptr = reinterpret_cast<int32_t *>(in.ptr()) + x;
369  int32_t s_in = *in_ptr;
370 
371  // Accumulate bias
372  if(has_bias)
373  {
374  const auto bias_ptr = reinterpret_cast<int32_t *>(bi.ptr()) + x;
375  s_in += *bias_ptr;
376  }
377 
378  const auto out_ptr = reinterpret_cast<TOut *>(out.ptr()) + x;
379  *out_ptr = finalize_quantization(s_in, result_fixedpoint_multiplier, result_shift, result_offset_after_shift,
380  std::numeric_limits<TOut>::lowest(), std::numeric_limits<TOut>::max(), false);
381  }
382  },
383  in, bi, out);
384 }
385 } // namespace
386 
389 {
390  ARM_COMPUTE_UNUSED(bias);
391  // Perform validation step
393  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, bias, dst, info));
394 
395  _func = nullptr;
396  _result_fixedpoint_multiplier = info.result_fixedpoint_multiplier;
397  _result_shift = info.result_shift;
398  _result_offset_after_shift = info.result_offset_after_shift;
399 
400  // Auto-initialize output output if required
401  if(dst != nullptr)
402  {
403  // Work out expected output data type
404  const DataType output_dt = (src->data_type() == DataType::S32) ? info.output_data_type : DataType::S32;
405  // Output tensor auto initialization if not yet initialized
406  auto_init_if_empty(*dst, src->clone()->set_data_type(output_dt));
407  }
408 
409  Window win = calculate_max_window(*src, Steps());
410 
411  ICpuKernel::configure(win);
412 
413  const bool is_qasymm8_signed = (dst != nullptr) ? is_data_type_quantized_asymmetric_signed(dst->data_type()) : false;
414 
415  // Set appropriate function
416  if(src->data_layout() == DataLayout::NCHW)
417  {
418  switch(src->data_type())
419  {
420  case DataType::S32:
421  {
422  if(is_qasymm8_signed)
423  {
424  _func = &output_stage_nchw<int8_t>;
425  }
426  else
427  {
428  _func = &output_stage_nchw<uint8_t>;
429  }
430  break;
431  }
432 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
433  case DataType::F16:
434  {
435  _func = &output_stage_nchw<float16_t>;
436  break;
437  }
438 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
439  case DataType::F32:
440  {
441  _func = &output_stage_nchw<float>;
442  break;
443  }
444  default:
445  {
446  ARM_COMPUTE_ERROR("Unsupported combination of types among the inputs.");
447  }
448  }
449  }
450  else
451  {
452  switch(src->data_type())
453  {
454  case DataType::S32:
455  {
456  if(is_qasymm8_signed)
457  {
458  _func = &output_stage_nhwc<int8_t>;
459  }
460  else
461  {
462  _func = &output_stage_nhwc<uint8_t>;
463  }
464  break;
465  }
466 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
467  case DataType::F16:
468  {
469  _func = &output_stage_nhwc<float16_t>;
470  break;
471  }
472 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
473  case DataType::F32:
474  {
475  _func = &output_stage_nhwc<float>;
476  break;
477  }
478  default:
479  {
480  ARM_COMPUTE_ERROR("Unsupported combination of types among the inputs.");
481  }
482  }
483  }
484 }
485 
488 {
489  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, bias, dst, info));
490  return Status{};
491 }
492 
493 void CpuDirectConv2dOutputStageKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
494 {
495  ARM_COMPUTE_UNUSED(info);
498  ARM_COMPUTE_ERROR_ON(_func == nullptr);
499 
500  auto src = tensors.get_tensor(TensorType::ACL_SRC_0);
501  auto bias = tensors.get_const_tensor(TensorType::ACL_SRC_1);
502  auto dst = tensors.get_tensor(TensorType::ACL_DST);
503 
504  (*_func)(src, bias, window, dst, _result_fixedpoint_multiplier, _result_shift, _result_offset_after_shift);
505 }
506 
508 {
509  return "CpuDirectConv2dOutputStageKernel";
510 }
511 } // namespace kernels
512 } // namespace cpu
513 } // namespace arm_compute
int32_t result_fixedpoint_multiplier
Result output stage multiplier used for quantizing.
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
Definition: Validate.h:115
int32_t result_offset_after_shift
Result offset used for quantizing.
SimpleTensor< float > b
Definition: DFT.cpp:157
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
uint8x16_t vloadq(const uint8_t *ptr)
Definition: load.h:58
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
uint8x8_t vadd(const uint8x8_t &a, const uint8x8_t &b)
Definition: add.h:39
1 channel, 1 F32 per channel
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status class.
Definition: Error.h:52
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
decltype(strategy::transforms) typedef type
static Status validate(const ITensorInfo *src, const ITensorInfo *bias=nullptr, const ITensorInfo *dst=nullptr, const DirectConvolutionLayerOutputStageKernelInfo &info=DirectConvolutionLayerOutputStageKernelInfo())
Static function to check if given info will lead to a valid configuration.
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2022 Arm Limited.
void configure(ITensorInfo *src, const ITensorInfo *bias=nullptr, ITensorInfo *dst=nullptr, const DirectConvolutionLayerOutputStageKernelInfo &info=DirectConvolutionLayerOutputStageKernelInfo())
Set the accumulate buffer and the biases of the kernel.
1 channel, 1 F16 per channel
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:159
1 channel, 1 S32 per channel
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Definition: ITensorPack.cpp:54
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
quantized, asymmetric fixed-point 8-bit number unsigned
Class to describe a number of elements in each dimension.
Definition: Steps.h:40
bool is_data_type_quantized_asymmetric_signed(DataType dt)
Check if a given data type is of asymmetric quantized signed type.
Definition: Utils.h:1071
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:915
Num samples, channels, height, width.
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
Definition: ITensorPack.cpp:64
uint8x16_t finalize_quantization(int32x4x4_t &in_s32, int result_fixedpoint_multiplier, int32_t result_shift, int32x4_t result_offset_after_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8, bool is_bounded_relu)
Performs final quantization step on 16 elements.
Definition: NEAsymm.h:81
Information about executing thread and CPU.
Definition: CPPTypes.h:169
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:439
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
DataType output_data_type
Output tensor data type to use if the output is not initialized.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:541
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
void vstore(uint8_t *ptr, uint8x8_t val)
Definition: store.h:39
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
Tensor packing service.
Definition: ITensorPack.h:39
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
uint8x8_t vdup_n(uint8_t value, traits::vector_64_tag)
Definition: dup_n.h:41
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
quantized, asymmetric fixed-point 8-bit number signed
Includes all wrapper headers at once.
DataType
Available data types.
Definition: Types.h:79
Describe a multidimensional execution window.
Definition: Window.h:39
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:1010
Descriptor used by the direct convolution layer output stage kernels.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:201
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
int32_t result_shift
Result output stage shift used for quantizing.
const int32_t * bias