Compute Library
 22.11
qasymm8_signed.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
26 #include "src/core/NEON/NEAsymm.h"
27 #include "src/core/NEON/NEMath.h"
29 
30 #include <arm_neon.h>
31 #include <cmath>
32 #include <cstddef>
33 
34 namespace arm_compute
35 {
36 namespace cpu
37 {
38 void neon_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
39 {
40  constexpr int window_step_x = 16;
41  const auto window_start_x = static_cast<int>(window.x().start());
42  const auto window_end_x = static_cast<int>(window.x().end());
44 
45  Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
46  win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
47 
48  Iterator input(src, win_collapsed);
49  Iterator output(dst, win_collapsed);
50 
51  const UniformQuantizationInfo qi_in = src->info()->quantization_info().uniform();
52  const UniformQuantizationInfo qi_out = dst->info()->quantization_info().uniform();
53  const qasymm8x16_signed_t va = vdupq_n_s8(quantize_qasymm8_signed(act_info.a(), qi_in));
54  const qasymm8x16_signed_t vb = vdupq_n_s8(quantize_qasymm8_signed(act_info.b(), qi_in));
55  const qasymm8_signed_t a = quantize_qasymm8_signed(act_info.a(), qi_in);
56  const qasymm8_signed_t b = quantize_qasymm8_signed(act_info.b(), qi_in);
57  const qasymm8_signed_t const_0 = quantize_qasymm8_signed(0.f, qi_in);
58  const qasymm8x16_signed_t vconst_0 = vdupq_n_s8(const_0);
59 #ifndef __aarch64__
60  const auto vconst_1 = vdupq_n_f32(1.f);
61  const auto vconst_0_f32 = vdupq_n_f32(0.f);
62 #endif // __aarch64__
63  const float32x4_t va_f32 = vdupq_n_f32(act_info.a());
64  const float32x4_t vb_f32 = vdupq_n_f32(act_info.b());
65  const float a_f32 = act_info.a();
66  const float b_f32 = act_info.b();
67  const auto const_6_f32 = vdupq_n_f32(6.f);
68  const auto const_0_f32 = vdupq_n_f32(0.f);
69  const auto const_3_f32 = vdupq_n_f32(3.f);
70  const auto const_inv_6_f32 = vdupq_n_f32(0.166666667f);
71 
72  // Initialise scale/offset for re-quantization
73  float s = qi_in.scale / qi_out.scale;
74  float o = -qi_in.offset * s + qi_out.offset;
75  float32x4_t vs = vdupq_n_f32(s);
76  float32x4_t vo = vdupq_n_f32(o);
77 
78  execute_window_loop(win_collapsed, [&](const Coordinates &)
79  {
80  const auto input_ptr = reinterpret_cast<const qasymm8_signed_t *>(input.ptr());
81  const auto output_ptr = reinterpret_cast<qasymm8_signed_t *>(output.ptr());
82 
84 
85  // Compute S elements per iteration
86  int x = window_start_x;
87  for(; x <= (window_end_x - window_step_x); x += window_step_x)
88  {
89  const auto vin = wrapper::vloadq(input_ptr + x);
91  {
92  // Perform activation
93  tmp = vmaxq_s8(vconst_0, vin);
94  // Re-quantize to new output space
95  tmp = vmlaq_qasymm8_signed(tmp, vs, vo);
96  }
98  {
99  // Perform activation
100  tmp = vminq_s8(va, vmaxq_s8(vconst_0, vin));
101  // Re-quantize to new output space
102  tmp = vmlaq_qasymm8_signed(tmp, vs, vo);
103  }
105  {
106  // Perform activation
107  tmp = vminq_s8(va, vmaxq_s8(vb, vin));
108  // Re-quantize to new output space
109  tmp = vmlaq_qasymm8_signed(tmp, vs, vo);
110  }
111 #ifndef __aarch64__ // LUT-based implementation is used for aarch64 instead.
113  {
114  // De-quantize
115  const auto vin_deq = vdequantize(vin, qi_in);
116  // Perform activation
117  const float32x4x4_t tmp_dep =
118  {
119  {
120  wrapper::vdiv(vconst_1, wrapper::vadd(vconst_1, wrapper::vexpq(wrapper::vneg(vin_deq.val[0])))),
121  wrapper::vdiv(vconst_1, wrapper::vadd(vconst_1, wrapper::vexpq(wrapper::vneg(vin_deq.val[1])))),
122  wrapper::vdiv(vconst_1, wrapper::vadd(vconst_1, wrapper::vexpq(wrapper::vneg(vin_deq.val[2])))),
123  wrapper::vdiv(vconst_1, wrapper::vadd(vconst_1, wrapper::vexpq(wrapper::vneg(vin_deq.val[3])))),
124  }
125  };
126  // Re-quantize to new output space
127  tmp = vquantize_signed(tmp_dep, qi_out);
128  }
129 #endif // __aarch64__
131  {
132  // De-quantize
133  const auto vin_deq = vdequantize(vin, qi_in);
134  // Perform activation
135  const float32x4x4_t tmp_dep =
136  {
137  {
138  wrapper::vmul(va_f32, wrapper::vtanh(wrapper::vmul(vin_deq.val[0], vb_f32))),
139  wrapper::vmul(va_f32, wrapper::vtanh(wrapper::vmul(vin_deq.val[1], vb_f32))),
140  wrapper::vmul(va_f32, wrapper::vtanh(wrapper::vmul(vin_deq.val[2], vb_f32))),
141  wrapper::vmul(va_f32, wrapper::vtanh(wrapper::vmul(vin_deq.val[3], vb_f32))),
142  }
143  };
144  // Re-quantize to new output space
145  tmp = vquantize_signed(tmp_dep, qi_out);
146  }
148  {
149  // De-quantize
150  const auto vin_deq = vdequantize(vin, qi_in);
151  // Perform activation
152  const float32x4x4_t tmp_dep =
153  {
154  {
155  wrapper::vmul(vin_deq.val[0], wrapper::vmul(const_inv_6_f32, wrapper::vmin(const_6_f32, wrapper::vmax(const_0_f32, wrapper::vadd(vin_deq.val[0], const_3_f32))))),
156  wrapper::vmul(vin_deq.val[1], wrapper::vmul(const_inv_6_f32, wrapper::vmin(const_6_f32, wrapper::vmax(const_0_f32, wrapper::vadd(vin_deq.val[1], const_3_f32))))),
157  wrapper::vmul(vin_deq.val[2], wrapper::vmul(const_inv_6_f32, wrapper::vmin(const_6_f32, wrapper::vmax(const_0_f32, wrapper::vadd(vin_deq.val[2], const_3_f32))))),
158  wrapper::vmul(vin_deq.val[3], wrapper::vmul(const_inv_6_f32, wrapper::vmin(const_6_f32, wrapper::vmax(const_0_f32, wrapper::vadd(vin_deq.val[3], const_3_f32))))),
159  }
160  };
161  // Re-quantize to new output space
162  tmp = vquantize_signed(tmp_dep, qi_out);
163  }
165  {
166  const auto vin_deq = vdequantize(vin, qi_in);
167 
168 #ifdef __aarch64__
169  const uint32x4x4_t pos_mask =
170  {
171  {
172  wrapper::vcgtz(vin_deq.val[0]),
173  wrapper::vcgtz(vin_deq.val[1]),
174  wrapper::vcgtz(vin_deq.val[2]),
175  wrapper::vcgtz(vin_deq.val[3]),
176  }
177  };
178 #else // __aarch64__
179  const uint32x4x4_t pos_mask =
180  {
181  {
182  wrapper::vcgt(vin_deq.val[0], vconst_0_f32),
183  wrapper::vcgt(vin_deq.val[1], vconst_0_f32),
184  wrapper::vcgt(vin_deq.val[2], vconst_0_f32),
185  wrapper::vcgt(vin_deq.val[3], vconst_0_f32),
186  }
187  };
188 #endif // __aarch64__
189 
190  const float32x4x4_t tmp_dep =
191  {
192  {
193  wrapper::vbsl(pos_mask.val[0], vin_deq.val[0], wrapper::vmul(va_f32, vin_deq.val[0])),
194  wrapper::vbsl(pos_mask.val[1], vin_deq.val[1], wrapper::vmul(va_f32, vin_deq.val[1])),
195  wrapper::vbsl(pos_mask.val[2], vin_deq.val[2], wrapper::vmul(va_f32, vin_deq.val[2])),
196  wrapper::vbsl(pos_mask.val[3], vin_deq.val[3], wrapper::vmul(va_f32, vin_deq.val[3])),
197  }
198  };
199 
200  tmp = vquantize_signed(tmp_dep, qi_out);
201  }
202  else
203  {
204  ARM_COMPUTE_ERROR("Unsupported activation function");
205  }
206  wrapper::vstore(output_ptr + x, tmp);
207  }
208 
209  // Compute left-over elements
210  for(; x < window_end_x; ++x)
211  {
212  qasymm8_signed_t in = *(reinterpret_cast<const qasymm8_signed_t *>(input_ptr + x));
213  qasymm8_signed_t tmp = 0;
215  {
216  tmp = std::max(const_0, in);
217  tmp = utility::clamp<int32_t, qasymm8_signed_t>(tmp * s + o);
218  }
220  {
221  tmp = std::min(a, std::max(const_0, in));
222  tmp = utility::clamp<int32_t, qasymm8_signed_t>(tmp * s + o);
223  }
225  {
226  tmp = std::min(a, std::max(b, in));
227  tmp = utility::clamp<int32_t, qasymm8_signed_t>(tmp * s + o);
228  }
229 #ifndef __aarch64__ // LUT-based implementation is used for aarch64 instead.
231  {
232  float tmp_f = dequantize_qasymm8_signed(in, qi_in);
233  tmp_f = 1.f / (1.f + std::exp(-tmp_f));
234  tmp = quantize_qasymm8_signed(tmp_f, qi_out);
235  }
236 #endif // __aarch64__
238  {
239  float tmp_f = dequantize_qasymm8_signed(in, qi_in);
240  tmp_f = a_f32 * std::tanh(b_f32 * tmp_f);
241  tmp = quantize_qasymm8_signed(tmp_f, qi_out);
242  }
244  {
245  float tmp_f = dequantize_qasymm8_signed(in, qi_in);
246  tmp_f = tmp_f * ((std::min(std::max((tmp_f + 3), 0.0f), 6.0f)) * 0.166666667f);
247  tmp = quantize_qasymm8_signed(tmp_f, qi_out);
248  }
250  {
251  float tmp_f = dequantize_qasymm8_signed(in, qi_in);
252  tmp_f = tmp_f > 0 ? tmp_f : tmp_f * a_f32;
253  tmp = quantize_qasymm8_signed(tmp_f, qi_out);
254  }
255  else
256  {
257  ARM_COMPUTE_ERROR("Unsupported activation function");
258  }
259  *(output_ptr + x) = tmp;
260  }
261  },
262  input, output);
263 }
264 } // namespace cpu
265 } // namespace arm_compute
float32x2_t vdiv(const float32x2_t &a, const float32x2_t &b)
Definition: div.h:58
float32x4_t vtanh(const float32x4_t &a)
Definition: tanh.h:40
SimpleTensor< float > b
Definition: DFT.cpp:157
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
float32x4x2_t vdequantize(const uint8x8_t &qv, const UniformQuantizationInfo &qi)
Dequantize a neon vector holding 8 quantized values.
Definition: NEAsymm.h:415
float a() const
Get the alpha value.
Definition: Types.h:1684
uint8x16_t vloadq(const uint8_t *ptr)
Definition: load.h:58
uint8x8_t vadd(const uint8x8_t &a, const uint8x8_t &b)
Definition: add.h:39
void neon_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
Quantization info when assuming per layer quantization.
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:79
Activation Layer Information class.
Definition: Types.h:1639
Interface for CPU tensor.
Definition: ITensor.h:36
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2022 Arm Limited.
ActivationFunction
Available activation functions.
Definition: Types.h:1643
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
Window collapse_if_possible(const Window &full_window, size_t first, size_t last, bool *has_collapsed=nullptr) const
Collapse the dimensions between first and last if possible.
Definition: Window.inl:68
int8_t quantize_qasymm8_signed(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given a signed 8-bit asymmetric quantization scheme.
uint8x8_t vmin(const uint8x8_t &a, const uint8x8_t &b)
Definition: min.h:39
Coordinates of an item.
Definition: Coordinates.h:37
UniformQuantizationInfo uniform() const
Return per layer quantization info.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
Definition: Helpers.inl:139
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
int8x8_t vneg(const int8x8_t &a)
Definition: neg.h:39
int8x16_t vmlaq_qasymm8_signed(qasymm8x16_signed_t vd, float32x4_t vs, float32x4_t vo)
Perform a multiply-accumulate on all 16 components of a QASYMM8_SIGNED vector.
Definition: NEAsymm.inl:59
uint8x8_t vcgt(const uint8x8_t &a, const uint8x8_t &b)
Definition: cgt.h:39
uint8x8_t vmul(const uint8x8_t &a, const uint8x8_t &b)
Definition: mul.h:39
uint8x8_t vbsl(const uint8x8_t &a, const uint8x8_t &b, const uint8x8_t &c)
Definition: bsl.h:39
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
int8_t qasymm8_signed_t
8 bit signed quantized asymmetric scalar value
void vstore(uint8_t *ptr, uint8x8_t val)
Definition: store.h:39
float dequantize_qasymm8_signed(int8_t value, const INFO_TYPE &qinfo)
Dequantize a value given a signed 8-bit asymmetric quantization scheme.
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
typename neon_bitvector< T, BW >::type neon_bitvector_t
Helper type template to get the type of a neon vector.
Definition: traits.h:130
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1679
float b() const
Get the beta value.
Definition: Types.h:1689
Includes all wrapper headers at once.
int8x8_t vquantize_signed(const float32x4x2_t &qv, const UniformQuantizationInfo &qi)
Quantize a neon vector holding 8 floating point values.
Definition: NEAsymm.h:630
int8x16_t qasymm8x16_signed_t
8 bit quantized signed asymmetric vector with 16 elements
Definition: NEAsymm.h:43
constexpr int end() const
Return the end of the dimension.
Definition: Window.h:102
Iterator updated by execute_window_loop for each window element.
Definition: Helpers.h:46
uint8x8_t vmax(const uint8x8_t &a, const uint8x8_t &b)
Definition: max.h:39
constexpr int start() const
Return the start of the dimension.
Definition: Window.h:97
float32x4_t vexpq(const float32x4_t &a)
Definition: exp.h:47
Describe a multidimensional execution window.
Definition: Window.h:39
constexpr const Dimension & x() const
Alias to access the first dimension of the window.
Definition: Window.h:159