Compute Library
 22.11
qasymm8.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
27 #include "arm_compute/core/Types.h"
29 #include "src/core/NEON/SVEMath.h"
31 #include <arm_sve.h>
32 
33 namespace arm_compute
34 {
35 namespace cpu
36 {
37 void add_qasymm8_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
38 {
39  ARM_COMPUTE_UNUSED(policy);
40 
41  // Create input windows
42  Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
43  Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
44 
45  // Clear X Dimension on execution window as we handle manually
46  Window win = window;
47  win.set(Window::DimX, Window::Dimension(0, 1, 1));
48 
49  const auto window_start_x = static_cast<int>(window.x().start());
50  const auto window_end_x = static_cast<int>(window.x().end());
51  const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
52  const auto all_true_pg = svptrue_b8();
53 
54  const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform();
55  const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
56  const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform();
57 
58  const auto invvscaleo = svdup_n_f32(1.f / oq_info.scale);
59  const auto voffseto = svdup_n_f32(oq_info.offset);
60 
61  if(is_broadcast_across_x)
62  {
63  const bool is_broadcast_input_2 = input2_win.x().step() == 0;
64  Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
65  Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
66  const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0;
67  const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
68 
69  const svfloat32_t vscale1 = is_broadcast_input_2 ? svdup_n_f32(iq1_info.scale) : svdup_n_f32(iq2_info.scale);
70  const svfloat32_t vscale2 = is_broadcast_input_2 ? svdup_n_f32(iq2_info.scale) : svdup_n_f32(iq1_info.scale);
71  const svint32_t voffset1 = is_broadcast_input_2 ? svdup_n_s32(iq1_info.offset) : svdup_n_s32(iq2_info.offset);
72  const svint32_t voffset2 = is_broadcast_input_2 ? svdup_n_s32(iq2_info.offset) : svdup_n_s32(iq1_info.offset);
73 
74  // Clear X Dimension on execution window as we handle manually
75  non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
76 
77  Iterator broadcast_input(broadcast_tensor, broadcast_win);
78  Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
79  Iterator output(dst, win);
80 
81  execute_window_loop(win, [&](const Coordinates &)
82  {
83  const auto non_broadcast_input_ptr = reinterpret_cast<const uint8_t *>(non_broadcast_input.ptr());
84  const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
85 
86  const uint8_t broadcast_value = *reinterpret_cast<const uint8_t *>(broadcast_input.ptr());
87  const svuint8_t broadcast_value_vec = svdup_n_u8(broadcast_value);
88 
89  int x = window_start_x;
90  svbool_t pg = svwhilelt_b8(x, window_end_x);
91 
92  const auto bf_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(broadcast_value_vec))), voffset2)), vscale2);
93  const auto bf_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(broadcast_value_vec))), voffset2)), vscale2);
94  const auto bf_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(broadcast_value_vec))), voffset2)), vscale2);
95  const auto bf_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(broadcast_value_vec))), voffset2)), vscale2);
96 
97  do
98  {
99  const svuint8_t a = svld1_u8(pg, non_broadcast_input_ptr + x);
100 
101  const auto af_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(a))), voffset1)), vscale1);
102  const auto af_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(a))), voffset1)), vscale1);
103  const auto af_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(a))), voffset1)), vscale1);
104  const auto af_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(a))), voffset1)), vscale1);
105 
106  const auto rf_0 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
107  const auto rf_1 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
108  const auto rf_2 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_2, bf_2), invvscaleo));
109  const auto rf_3 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_3, bf_3), invvscaleo));
110 
111  const auto pa = svqxtnt_u32(svqxtnb_u32(rf_0), rf_1);
112  const auto pb = svqxtnt_u32(svqxtnb_u32(rf_2), rf_3);
113 
114  const auto res = svqxtnt_u16(svqxtnb_u16(pa), pb);
115  svst1_u8(pg, output_ptr + x, res);
116 
117  x += svcntb();
118  pg = svwhilelt_b8(x, window_end_x);
119  }
120  while(svptest_any(all_true_pg, pg));
121  },
122  broadcast_input, non_broadcast_input, output);
123  }
124  else
125  {
126  // Clear X Dimension on execution window as we handle manually
127  input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
128  input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
129 
130  Iterator input1(src0, input1_win);
131  Iterator input2(src1, input2_win);
132  Iterator output(dst, win);
133 
134  const auto vscale1 = svdup_n_f32(iq1_info.scale);
135  const auto vscale2 = svdup_n_f32(iq2_info.scale);
136  const auto voffset1 = svdup_n_s32(iq1_info.offset);
137  const auto voffset2 = svdup_n_s32(iq2_info.offset);
138 
139  execute_window_loop(win, [&](const Coordinates &)
140  {
141  const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
142  const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
143  const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
144 
145  int x = window_start_x;
146  svbool_t pg = svwhilelt_b8(x, window_end_x);
147  do
148  {
149  const auto a = svld1_u8(pg, input1_ptr + x);
150  const auto b = svld1_u8(pg, input2_ptr + x);
151  const auto af_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(a))), voffset1)), vscale1);
152  const auto af_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(a))), voffset1)), vscale1);
153  const auto af_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(a))), voffset1)), vscale1);
154  const auto af_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(a))), voffset1)), vscale1);
155 
156  const auto bf_0 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(b))), voffset2)), vscale2);
157  const auto bf_1 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(b))), voffset2)), vscale2);
158  const auto bf_2 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(b))), voffset2)), vscale2);
159  const auto bf_3 = svmul_f32_z(pg, svcvt_f32_s32_z(pg, svsub_s32_z(pg, svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(b))), voffset2)), vscale2);
160 
161  const auto rf_0 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_0, bf_0), invvscaleo));
162  const auto rf_1 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_1, bf_1), invvscaleo));
163  const auto rf_2 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_2, bf_2), invvscaleo));
164  const auto rf_3 = svcvt_u32_f32_z(pg, svmla_f32_z(pg, voffseto, svadd_f32_z(pg, af_3, bf_3), invvscaleo));
165 
166  const auto pa = svqxtnt_u32(svqxtnb_u32(rf_0), rf_1);
167  const auto pb = svqxtnt_u32(svqxtnb_u32(rf_2), rf_3);
168  const auto res = svqxtnt_u16(svqxtnb_u16(pa), pb);
169 
170  svst1_u8(pg, output_ptr + x, res);
171 
172  x += svcntb();
173  pg = svwhilelt_b8(x, window_end_x);
174  }
175  while(svptest_any(all_true_pg, pg));
176  },
177  input1, input2, output);
178  }
179 }
180 } // namespace cpu
181 } // namespace arm_compute
SimpleTensor< float > b
Definition: DFT.cpp:157
constexpr int step() const
Return the step of the dimension.
Definition: Window.h:107
Quantization info when assuming per layer quantization.
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:79
Interface for CPU tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2022 Arm Limited.
T x() const
Alias to access the size of the first dimension.
Definition: Dimensions.h:87
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
Coordinates of an item.
Definition: Coordinates.h:37
UniformQuantizationInfo uniform() const
Return per layer quantization info.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
Definition: Helpers.inl:139
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
Window broadcast_if_dimension_le_one(const TensorShape &shape) const
Don&#39;t advance in the dimension where shape is less equal to 1.
Definition: Window.inl:120
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
constexpr int end() const
Return the end of the dimension.
Definition: Window.h:102
Iterator updated by execute_window_loop for each window element.
Definition: Helpers.h:46
void add_qasymm8_sve2(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
Definition: qasymm8.cpp:37
constexpr int start() const
Return the start of the dimension.
Definition: Window.h:97
Describe a multidimensional execution window.
Definition: Window.h:39
ConvertPolicy
Policy to handle integer overflow.
Definition: Types.h:404
constexpr const Dimension & x() const
Alias to access the first dimension of the window.
Definition: Window.h:159