Compute Library
 22.11
qasymm8_signed.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
26 #include "arm_compute/core/Types.h"
31 
32 namespace arm_compute
33 {
34 namespace cpu
35 {
36 void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
37 {
38  ARM_COMPUTE_UNUSED(policy);
39 
40  // Create input windows
41  Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
42  Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
43 
44  // Clear X Dimension on execution window as we handle manually
45  Window win = window;
46  win.set(Window::DimX, Window::Dimension(0, 1, 1));
47 
48  constexpr int window_step_x = 16;
49  const auto window_start_x = static_cast<int>(window.x().start());
50  const auto window_end_x = static_cast<int>(window.x().end());
51  const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
52 
53  const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform();
54  const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
55  const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform();
56 
57  const auto scale1 = iq1_info.scale / oq_info.scale;
58  const auto scale2 = iq2_info.scale / oq_info.scale;
59  const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset);
60 
61  if(is_broadcast_across_x)
62  {
63  const bool is_broadcast_input_2 = input2_win.x().step() == 0;
64  Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
65  Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
66  const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0;
67  const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
68 
69  const auto af_scale = is_broadcast_input_2 ? scale1 : scale2;
70  const auto bf_scale = is_broadcast_input_2 ? scale2 : scale1;
71  const auto vscale1 = vdupq_n_f32(af_scale);
72 
73  // Clear X Dimension on execution window as we handle manually
74  non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
75 
76  Iterator broadcast_input(broadcast_tensor, broadcast_win);
77  Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
78  Iterator output(dst, win);
79 
80  execute_window_loop(win, [&](const Coordinates &)
81  {
82  const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
83  const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
84 
85  const auto broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
86  const auto bf = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
87  const auto bfs = float(broadcast_value) * bf_scale + offset;
88 
89  // Compute S elements per iteration
90  int x = window_start_x;
91  for(; x <= (window_end_x - window_step_x); x += window_step_x)
92  {
93  const int8x16_t a = vld1q_s8(non_broadcast_input_ptr + x);
94 
95  const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
96  const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
97 
98  const auto af_0 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
99  const auto af_1 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
100  const auto af_2 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
101  const auto af_3 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
102 
103  int32x4_t rf_0{};
104  int32x4_t rf_1{};
105  int32x4_t rf_2{};
106  int32x4_t rf_3{};
107 
108 #ifdef __aarch64__
109  rf_0 = vcvtnq_s32_f32(af_0);
110  rf_1 = vcvtnq_s32_f32(af_1);
111  rf_2 = vcvtnq_s32_f32(af_2);
112  rf_3 = vcvtnq_s32_f32(af_3);
113 #else //__aarch64__
114  rf_0 = vcvtq_s32_f32(af_0);
115  rf_1 = vcvtq_s32_f32(af_1);
116  rf_2 = vcvtq_s32_f32(af_2);
117  rf_3 = vcvtq_s32_f32(af_3);
118 #endif //__aarch64__
119 
120  const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
121  const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
122  vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
123  }
124 
125  // Compute left-over elements
126  for(; x < window_end_x; ++x)
127  {
128  const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
129 #ifdef __aarch64__
130  output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
131 #else // __aarch64__
132  output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
133 #endif // __aarch64__
134  }
135  },
136  broadcast_input, non_broadcast_input, output);
137  }
138  else
139  {
140  // Clear X Dimension on execution window as we handle manually
141  input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
142  input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
143 
144  Iterator input1(src0, input1_win);
145  Iterator input2(src1, input2_win);
146  Iterator output(dst, win);
147 
148  const auto vscale1 = vdupq_n_f32(scale1);
149  const auto vscale2 = vdupq_n_f32(scale2);
150  const auto voffset = vdupq_n_f32(offset);
151 
152  execute_window_loop(win, [&](const Coordinates &)
153  {
154  const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
155  const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
156  const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
157 
158  // Compute S elements per iteration
159  int x = window_start_x;
160  for(; x <= (window_end_x - window_step_x); x += window_step_x)
161  {
162  const int8x16_t a = vld1q_s8(input1_ptr + x);
163  const int8x16_t b = vld1q_s8(input2_ptr + x);
164 
165  const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
166  const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
167  const auto b_s16_0 = vmovl_s8(vget_low_s8(b));
168  const auto b_s16_1 = vmovl_s8(vget_high_s8(b));
169 
170  const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
171  const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
172  const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
173  const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
174 
175  const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_0))), vscale2);
176  const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_0))), vscale2);
177  const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_1))), vscale2);
178  const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_1))), vscale2);
179 
180  int32x4_t rf_0{};
181  int32x4_t rf_1{};
182  int32x4_t rf_2{};
183  int32x4_t rf_3{};
184 
185 #ifdef __aarch64__
186  rf_0 = vcvtnq_s32_f32(bf_0);
187  rf_1 = vcvtnq_s32_f32(bf_1);
188  rf_2 = vcvtnq_s32_f32(bf_2);
189  rf_3 = vcvtnq_s32_f32(bf_3);
190 #else //__aarch64__
191  rf_0 = vcvtq_s32_f32(bf_0);
192  rf_1 = vcvtq_s32_f32(bf_1);
193  rf_2 = vcvtq_s32_f32(bf_2);
194  rf_3 = vcvtq_s32_f32(bf_3);
195 #endif //__aarch64__
196 
197  const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
198  const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
199  vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
200  }
201 
202  // Compute left-over elements
203  for(; x < window_end_x; ++x)
204  {
205  const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
206 #ifdef __aarch64__
207  output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
208 #else // __aarch64__
209  output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
210 #endif // __aarch64__
211  }
212  },
213  input1, input2, output);
214  }
215 }
216 } // namespace cpu
217 } // namespace arm_compute
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:1084
T trunc(T value)
Truncate floating-point value.
SimpleTensor< float > b
Definition: DFT.cpp:157
constexpr int step() const
Return the step of the dimension.
Definition: Window.h:107
Quantization info when assuming per layer quantization.
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:79
Interface for CPU tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2022 Arm Limited.
T x() const
Alias to access the size of the first dimension.
Definition: Dimensions.h:87
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
Coordinates of an item.
Definition: Coordinates.h:37
UniformQuantizationInfo uniform() const
Return per layer quantization info.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
Definition: Helpers.inl:139
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
Window broadcast_if_dimension_le_one(const TensorShape &shape) const
Don&#39;t advance in the dimension where shape is less equal to 1.
Definition: Window.inl:120
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
void add_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
long lround(T value)
Round floating-point value with half value rounding away from zero and cast to long.
std::vector< NodeID > bfs(Graph &g)
Breadth first search traversal.
constexpr int end() const
Return the end of the dimension.
Definition: Window.h:102
Iterator updated by execute_window_loop for each window element.
Definition: Helpers.h:46
constexpr int start() const
Return the start of the dimension.
Definition: Window.h:97
Describe a multidimensional execution window.
Definition: Window.h:39
ConvertPolicy
Policy to handle integer overflow.
Definition: Types.h:404
constexpr const Dimension & x() const
Alias to access the first dimension of the window.
Definition: Window.h:159