Compute Library
 22.11
qasymm8_signed.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
27 #include "arm_compute/core/Types.h"
29 #include "src/core/NEON/SVEMath.h"
31 #include <arm_sve.h>
32 
33 namespace arm_compute
34 {
35 namespace cpu
36 {
37 void add_qasymm8_signed_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
38 {
39  ARM_COMPUTE_UNUSED(policy);
40 
41  // Create input windows
42  Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
43  Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
44 
45  // Clear X Dimension on execution window as we handle manually
46  Window win = window;
47  win.set(Window::DimX, Window::Dimension(0, 1, 1));
48 
49  const auto window_start_x = static_cast<int>(window.x().start());
50  const auto window_end_x = static_cast<int>(window.x().end());
51  const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
52 
53  const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform();
54  const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
55  const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform();
56 
57  const auto invvscaleo = svdup_n_f32(1.f / oq_info.scale);
58  const auto voffseto = svdup_n_f32(oq_info.offset);
59 
60  if(is_broadcast_across_x)
61  {
62  const bool is_broadcast_input_2 = input2_win.x().step() == 0;
63  Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
64  Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
65  const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0;
66  const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
67  const auto all_true_pg = svptrue_b8();
68 
69  const auto vscale1 = is_broadcast_input_2 ? svdup_n_f32(iq1_info.scale) : svdup_n_f32(iq2_info.scale);
70  const auto vscale2 = is_broadcast_input_2 ? svdup_n_f32(iq2_info.scale) : svdup_n_f32(iq1_info.scale);
71  const auto voffset1 = is_broadcast_input_2 ? svdup_n_s32(iq1_info.offset) : svdup_n_s32(iq2_info.offset);
72  const auto voffset2 = is_broadcast_input_2 ? svdup_n_s32(iq2_info.offset) : svdup_n_s32(iq1_info.offset);
73 
74  // Clear X Dimension on execution window as we handle manually
75  non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
76 
77  Iterator broadcast_input(broadcast_tensor, broadcast_win);
78  Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
79  Iterator output(dst, win);
80 
81  execute_window_loop(win, [&](const Coordinates &)
82  {
83  const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
84  const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
85 
86  const int8_t broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
87  const auto broadcast_value_vec = svdup_n_s8(broadcast_value);
88 
89  int x = window_start_x;
90  svbool_t pg = svwhilelt_b8(x, window_end_x);
91  const auto bf_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlb_s16(broadcast_value_vec)), voffset2)), vscale2);
92  const auto bf_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlb_s16(broadcast_value_vec)), voffset2)), vscale2);
93  const auto bf_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlt_s16(broadcast_value_vec)), voffset2)), vscale2);
94  const auto bf_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlt_s16(broadcast_value_vec)), voffset2)), vscale2);
95 
96  do
97  {
98  const auto a = svld1_s8(pg, non_broadcast_input_ptr + x);
99  const auto af_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlb_s16(a)), voffset1)), vscale1);
100  const auto af_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlb_s16(a)), voffset1)), vscale1);
101  const auto af_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlt_s16(a)), voffset1)), vscale1);
102  const auto af_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlt_s16(a)), voffset1)), vscale1);
103 
104  const auto rf_0 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
105  const auto rf_1 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
106  const auto rf_2 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_2, bf_2), invvscaleo));
107  const auto rf_3 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_3, bf_3), invvscaleo));
108 
109  const auto pa = svqxtnt_s32(svqxtnb_s32(rf_0), rf_1);
110  const auto pb = svqxtnt_s32(svqxtnb_s32(rf_2), rf_3);
111  const auto res = svqxtnt_s16(svqxtnb_s16(pa), pb);
112 
113  svst1_s8(pg, output_ptr + x, res);
114 
115  x += svcntb();
116  pg = svwhilelt_b8(x, window_end_x);
117  }
118  while(svptest_any(all_true_pg, pg));
119  },
120  broadcast_input, non_broadcast_input, output);
121  }
122  else
123  {
124  // Clear X Dimension on execution window as we handle manually
125  input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
126  input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
127 
128  Iterator input1(src0, input1_win);
129  Iterator input2(src1, input2_win);
130  Iterator output(dst, win);
131 
132  const auto vscale1 = svdup_n_f32(iq1_info.scale);
133  const auto vscale2 = svdup_n_f32(iq2_info.scale);
134  const auto voffset1 = svdup_n_s32(iq1_info.offset);
135  const auto voffset2 = svdup_n_s32(iq2_info.offset);
136 
137  execute_window_loop(win, [&](const Coordinates &)
138  {
139  const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
140  const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
141  const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
142 
143  int x = window_start_x;
144  svbool_t pg = svwhilelt_b8(x, window_end_x);
145  do
146  {
147  const auto a = svld1_s8(pg, input1_ptr + x);
148  const auto b = svld1_s8(pg, input2_ptr + x);
149 
150  const auto af_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlb_s16(a)), voffset1)), vscale1);
151  const auto af_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlb_s16(a)), voffset1)), vscale1);
152  const auto af_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlt_s16(a)), voffset1)), vscale1);
153  const auto af_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlt_s16(a)), voffset1)), vscale1);
154 
155  const auto bf_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlb_s16(b)), voffset2)), vscale2);
156  const auto bf_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlb_s16(b)), voffset2)), vscale2);
157  const auto bf_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlb_s32(svmovlt_s16(b)), voffset2)), vscale2);
158  const auto bf_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svmovlt_s32(svmovlt_s16(b)), voffset2)), vscale2);
159 
160  const auto rf_0 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
161  const auto rf_1 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
162  const auto rf_2 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_2, bf_2), invvscaleo));
163  const auto rf_3 = svcvt_s32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_3, bf_3), invvscaleo));
164 
165  const auto pa = svqxtnt_s32(svqxtnb_s32(rf_0), rf_1);
166  const auto pb = svqxtnt_s32(svqxtnb_s32(rf_2), rf_3);
167  const auto res = svqxtnt_s16(svqxtnb_s16(pa), pb);
168 
169  svst1_s8(pg, output_ptr + x, res);
170 
171  x += svcntb();
172  pg = svwhilelt_b8(x, window_end_x);
173  }
174  while(svptest_any(svptrue_b8(), pg));
175  },
176  input1, input2, output);
177  }
178 }
179 } // namespace cpu
180 } // namespace arm_compute
void add_qasymm8_signed_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
SimpleTensor< float > b
Definition: DFT.cpp:157
constexpr int step() const
Return the step of the dimension.
Definition: Window.h:107
Quantization info when assuming per layer quantization.
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:79
Interface for CPU tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2022 Arm Limited.
T x() const
Alias to access the size of the first dimension.
Definition: Dimensions.h:87
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
Coordinates of an item.
Definition: Coordinates.h:37
UniformQuantizationInfo uniform() const
Return per layer quantization info.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
Definition: Helpers.inl:139
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
Window broadcast_if_dimension_le_one(const TensorShape &shape) const
Don&#39;t advance in the dimension where shape is less equal to 1.
Definition: Window.inl:120
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
constexpr int end() const
Return the end of the dimension.
Definition: Window.h:102
Iterator updated by execute_window_loop for each window element.
Definition: Helpers.h:46
constexpr int start() const
Return the start of the dimension.
Definition: Window.h:97
Describe a multidimensional execution window.
Definition: Window.h:39
ConvertPolicy
Policy to handle integer overflow.
Definition: Types.h:404
constexpr const Dimension & x() const
Alias to access the first dimension of the window.
Definition: Window.h:159