Compute Library
 21.02
CpuConcatenateWidthKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
31 #include "arm_compute/core/Utils.h"
34 #include "src/core/NEON/NEAsymm.h"
38 
39 #include <cstdint>
40 
41 namespace arm_compute
42 {
43 namespace cpu
44 {
45 namespace kernels
46 {
47 namespace
48 {
49 Status validate_arguments(const ITensorInfo *src, unsigned int width_offset, const ITensorInfo *dst)
50 {
52  // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use Neon FP16 instructions.
55  ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) + width_offset > dst->dimension(0));
56 
57  for(size_t i = 1; i < Coordinates::num_max_dimensions; ++i)
58  {
59  ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(i) != dst->dimension(i));
60  }
61 
62  return Status{};
63 }
64 } // namespace
65 
67  : _width_offset(0)
68 {
69 }
70 
71 void CpuConcatenateWidthKernel::configure(const ITensorInfo *src, unsigned int width_offset, ITensorInfo *dst)
72 {
74  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, width_offset, dst));
75 
76  _width_offset = width_offset;
77 
78  // Configure kernel window
79  Window win = calculate_max_window(*src, Steps());
80  Coordinates coord;
81  coord.set_num_dimensions(dst->num_dimensions());
82  dst->set_valid_region(ValidRegion(coord, dst->tensor_shape()));
83 
84  ICpuKernel::configure(win);
85 }
86 
87 Status CpuConcatenateWidthKernel::validate(const ITensorInfo *src, unsigned int width_offset, const ITensorInfo *dst)
88 {
89  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, width_offset, dst));
90  return Status{};
91 }
92 
94 {
95  ARM_COMPUTE_UNUSED(info);
98 
99  const auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
100  auto dst = tensors.get_tensor(TensorType::ACL_DST);
101 
102  // Offset output pointer to the correct position
103  uint8_t *dst_ptr = dst->buffer() + dst->info()->offset_first_element_in_bytes() + _width_offset * dst->info()->strides_in_bytes()[0];
104 
105  const auto window_start_x = static_cast<int>(window.x().start());
106  const auto window_end_x = static_cast<int>(window.x().end()) * static_cast<int>(dst->info()->element_size());
107  constexpr int window_step_x = 16;
108 
109  Window win{ window };
110  win.set(Window::DimX, Window::Dimension(0, 1, 1));
111 
112  // Create iterators
113  Iterator src_it(src, win);
114  Iterator dst_it(dst, win);
115  const DataType dt = src->info()->data_type();
116  const UniformQuantizationInfo &src_qinfo = src->info()->quantization_info().uniform();
117  const UniformQuantizationInfo &dst_qinfo = dst->info()->quantization_info().uniform();
118  if(dt == DataType::QASYMM8 && src_qinfo != dst_qinfo)
119  {
120  execute_window_loop(win, [&](const Coordinates &)
121  {
122  int x = window_start_x;
123  for(; x <= (window_end_x - window_step_x); x += window_step_x)
124  {
125  vst1q_u8(dst_ptr + dst_it.offset() + x, vquantize(vdequantize(vld1q_u8(src_it.ptr() + x), src_qinfo), dst_qinfo));
126  }
127 
128  // Compute left-over elements
129  for(; x < window_end_x; ++x)
130  {
131  *(dst_ptr + dst_it.offset() + x) = quantize_qasymm8(dequantize_qasymm8(*(src_it.ptr() + x), src_qinfo), dst_qinfo);
132  }
133  },
134  src_it, dst_it);
135  }
136  else if(dt == DataType::QASYMM8_SIGNED && src_qinfo != dst_qinfo)
137  {
138  execute_window_loop(win, [&](const Coordinates &)
139  {
140  int x = window_start_x;
141  for(; x <= (window_end_x - window_step_x); x += window_step_x)
142  {
143  vst1q_s8(reinterpret_cast<int8_t *>(dst_ptr + dst_it.offset() + x),
144  vquantize_signed(vdequantize(vld1q_s8(reinterpret_cast<int8_t *>(src_it.ptr() + x)), src_qinfo), dst_qinfo));
145  }
146 
147  // Compute left-over elements
148  for(; x < window_end_x; ++x)
149  {
150  *(dst_ptr + dst_it.offset() + x) = quantize_qasymm8_signed(dequantize_qasymm8_signed(*(src_it.ptr() + x), src_qinfo), dst_qinfo);
151  }
152  },
153  src_it, dst_it);
154  }
155  else
156  {
157  execute_window_loop(win, [&](const Coordinates &)
158  {
159  const auto in_ptr = src_it.ptr();
160  const auto out_ptr = dst_ptr + dst_it.offset();
161  int x = window_start_x;
162  for(; x <= (window_end_x - window_step_x); x += window_step_x)
163  {
164  wrapper::vstore(out_ptr + x, wrapper::vloadq(in_ptr + x));
165  }
166 
167  // Compute left-over elements
168  for(; x < window_end_x; ++x)
169  {
170  *(out_ptr + x) = *(in_ptr + x);
171  }
172  },
173  src_it, dst_it);
174  }
175 }
176 
178 {
179  return "CpuConcatenateWidthKernel";
180 }
181 } // namespace kernels
182 } // namespace cpu
183 } // namespace arm_compute
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
float dequantize_qasymm8(uint8_t value, const INFO_TYPE &qinfo)
Dequantize a value given an unsigned 8-bit asymmetric quantization scheme.
uint8_t quantize_qasymm8(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given an unsigned 8-bit asymmetric quantization scheme.
float32x4x2_t vdequantize(const uint8x8_t &qv, const UniformQuantizationInfo &qi)
Dequantize a neon vector holding 8 quantized values.
Definition: NEAsymm.h:415
uint8x16_t vloadq(const uint8_t *ptr)
Definition: load.h:58
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Quantization info when assuming per layer quantization.
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:77
Status class.
Definition: Error.h:52
void configure(const ITensorInfo *src, unsigned int width_offset, ITensorInfo *dst)
Configure kernel for a given list of arguments.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2021 Arm Limited.
virtual void set_valid_region(const ValidRegion &valid_region)=0
Set the valid region of the tensor.
DataType dt
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
static Status validate(const ITensorInfo *src, unsigned int width_offset, const ITensorInfo *dst)
Static function to check if given info will lead to a valid configuration of CpuConcatenateWidthKerne...
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Definition: ITensorPack.cpp:40
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
int8_t quantize_qasymm8_signed(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given a signed 8-bit asymmetric quantization scheme.
quantized, asymmetric fixed-point 8-bit number unsigned
Class to describe a number of elements in each dimension.
Definition: Steps.h:40
Coordinates of an item.
Definition: Coordinates.h:37
virtual uint8_t * buffer() const =0
Interface to be implemented by the child class to return a pointer to CPU memory. ...
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
Definition: Helpers.inl:139
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
const char * name() const override
Name of the kernel.
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
Definition: ITensorPack.cpp:50
Information about executing thread and CPU.
Definition: CPPTypes.h:235
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
uint8x8_t vquantize(const float32x4x2_t &qv, const UniformQuantizationInfo &qi)
Quantize a neon vector holding 8 floating point values.
Definition: NEAsymm.h:602
void vstore(uint8_t *ptr, uint8x8_t val)
Definition: store.h:39
float dequantize_qasymm8_signed(int8_t value, const INFO_TYPE &qinfo)
Dequantize a value given a signed 8-bit asymmetric quantization scheme.
Tensor packing service.
Definition: ITensorPack.h:37
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
void set_num_dimensions(size_t num_dimensions)
Set number of dimensions.
Definition: Dimensions.h:149
quantized, asymmetric fixed-point 8-bit number signed
Includes all wrapper headers at once.
int8x8_t vquantize_signed(const float32x4x2_t &qv, const UniformQuantizationInfo &qi)
Quantize a neon vector holding 8 floating point values.
Definition: NEAsymm.h:630
constexpr size_t offset() const
Return the offset in bytes from the first element to the current position of the iterator.
Definition: Helpers.inl:134
Container for valid region of a window.
Definition: Types.h:188
constexpr int end() const
Return the end of the dimension.
Definition: Window.h:99
static constexpr size_t num_max_dimensions
Number of dimensions the tensor has.
Definition: Dimensions.h:46
Iterator updated by execute_window_loop for each window element.
Definition: Helpers.h:46
DataType
Available data types.
Definition: Types.h:77
constexpr int start() const
Return the start of the dimension.
Definition: Window.h:94
Describe a multidimensional execution window.
Definition: Window.h:39
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205
constexpr const Dimension & x() const
Alias to access the first dimension of the window.
Definition: Window.h:145