Compute Library
 21.02
qasymm8.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
27 #include "src/core/NEON/NEAsymm.h"
28 #include "src/core/NEON/NEMath.h"
31 
32 #include <arm_neon.h>
33 #include <cmath>
34 #include <cstddef>
35 
36 namespace arm_compute
37 {
38 namespace cpu
39 {
40 void qasymm8_neon_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
41 {
42  constexpr int window_step_x = 16;
43  const auto window_start_x = static_cast<int>(window.x().start());
44  const auto window_end_x = static_cast<int>(window.x().end());
46 
47  Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
48  win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
49 
50  Iterator input(src, win_collapsed);
51  Iterator output(dst, win_collapsed);
52 
53  const UniformQuantizationInfo qi_in = src->info()->quantization_info().uniform();
54  const UniformQuantizationInfo qi_out = dst->info()->quantization_info().uniform();
55  const qasymm8x16_t va = vdupq_n_u8(quantize_qasymm8(act_info.a(), qi_in));
56  const qasymm8x16_t vb = vdupq_n_u8(quantize_qasymm8(act_info.b(), qi_in));
57  const qasymm8_t a = quantize_qasymm8(act_info.a(), qi_in);
58  const qasymm8_t b = quantize_qasymm8(act_info.b(), qi_in);
59  const qasymm8_t const_0 = quantize_qasymm8(0.f, qi_in);
60  const qasymm8x16_t vconst_0 = vdupq_n_u8(const_0);
61  const auto vconst_1 = vdupq_n_f32(1.f);
62 #ifndef __aarch64__
63  const auto vconst_0_f32 = vdupq_n_f32(0);
64 #endif // __aarch64__
65  const float32x4_t va_f32 = vdupq_n_f32(act_info.a());
66  const float32x4_t vb_f32 = vdupq_n_f32(act_info.b());
67  const float a_f32 = act_info.a();
68  const float b_f32 = act_info.b();
69  const auto const_6_f32 = vdupq_n_f32(6.f);
70  const auto const_0_f32 = vdupq_n_f32(0.f);
71  const auto const_3_f32 = vdupq_n_f32(3.f);
72  const auto const_inv_6_f32 = vdupq_n_f32(0.166666667f);
73 
74  // Initialise scale/offset for re-quantization
75  float s = qi_in.scale / qi_out.scale;
76  float o = -qi_in.offset * s + qi_out.offset;
77  float32x4_t vs = vdupq_n_f32(s);
78  float32x4_t vo = vdupq_n_f32(o);
79 
80  execute_window_loop(win_collapsed, [&](const Coordinates &)
81  {
82  const auto input_ptr = reinterpret_cast<const qasymm8_t *>(input.ptr());
83  const auto output_ptr = reinterpret_cast<qasymm8_t *>(output.ptr());
84 
86 
87  // Compute S elements per iteration
88  int x = window_start_x;
89  for(; x <= (window_end_x - window_step_x); x += window_step_x)
90  {
91  const auto vin = wrapper::vloadq(input_ptr + x);
93  {
94  // Perform activation
95  tmp = vmaxq_u8(vconst_0, vin);
96  // Re-quantize to new output space
97  tmp = vmlaq_qasymm8(tmp, vs, vo);
98  }
100  {
101  // Perform activation
102  tmp = vminq_u8(va, vmaxq_u8(vconst_0, vin));
103  // Re-quantize to new output space
104  tmp = vmlaq_qasymm8(tmp, vs, vo);
105  }
107  {
108  // Perform activation
109  tmp = vminq_u8(va, vmaxq_u8(vb, vin));
110  // Re-quantize to new output space
111  tmp = vmlaq_qasymm8(tmp, vs, vo);
112  }
114  {
115  // De-quantize
116  const auto vin_deq = vdequantize(vin, qi_in);
117  // Perform activation
118  const float32x4x4_t tmp_dep =
119  {
120  {
121  wrapper::vdiv(vconst_1, wrapper::vadd(vconst_1, wrapper::vexpq(wrapper::vneg(vin_deq.val[0])))),
122  wrapper::vdiv(vconst_1, wrapper::vadd(vconst_1, wrapper::vexpq(wrapper::vneg(vin_deq.val[1])))),
123  wrapper::vdiv(vconst_1, wrapper::vadd(vconst_1, wrapper::vexpq(wrapper::vneg(vin_deq.val[2])))),
124  wrapper::vdiv(vconst_1, wrapper::vadd(vconst_1, wrapper::vexpq(wrapper::vneg(vin_deq.val[3])))),
125  }
126  };
127  // Re-quantize to new output space
128  tmp = vquantize(tmp_dep, qi_out);
129  }
131  {
132  // De-quantize
133  const auto vin_deq = vdequantize(vin, qi_in);
134  // Perform activation
135  const float32x4x4_t tmp_dep =
136  {
137  {
138  wrapper::vmul(va_f32, wrapper::vtanh(wrapper::vmul(vin_deq.val[0], vb_f32))),
139  wrapper::vmul(va_f32, wrapper::vtanh(wrapper::vmul(vin_deq.val[1], vb_f32))),
140  wrapper::vmul(va_f32, wrapper::vtanh(wrapper::vmul(vin_deq.val[2], vb_f32))),
141  wrapper::vmul(va_f32, wrapper::vtanh(wrapper::vmul(vin_deq.val[3], vb_f32))),
142  }
143  };
144  // Re-quantize to new output space
145  tmp = vquantize(tmp_dep, qi_out);
146  }
148  {
149  // De-quantize
150  const auto vin_deq = vdequantize(vin, qi_in);
151  // Perform activation
152  const float32x4x4_t tmp_dep =
153  {
154  {
155  wrapper::vmul(vin_deq.val[0], wrapper::vmul(const_inv_6_f32, wrapper::vmin(const_6_f32, wrapper::vmax(const_0_f32, wrapper::vadd(vin_deq.val[0], const_3_f32))))),
156  wrapper::vmul(vin_deq.val[1], wrapper::vmul(const_inv_6_f32, wrapper::vmin(const_6_f32, wrapper::vmax(const_0_f32, wrapper::vadd(vin_deq.val[1], const_3_f32))))),
157  wrapper::vmul(vin_deq.val[2], wrapper::vmul(const_inv_6_f32, wrapper::vmin(const_6_f32, wrapper::vmax(const_0_f32, wrapper::vadd(vin_deq.val[2], const_3_f32))))),
158  wrapper::vmul(vin_deq.val[3], wrapper::vmul(const_inv_6_f32, wrapper::vmin(const_6_f32, wrapper::vmax(const_0_f32, wrapper::vadd(vin_deq.val[3], const_3_f32))))),
159  }
160  };
161  // Re-quantize to new output space
162  tmp = vquantize(tmp_dep, qi_out);
163  }
165  {
166  const auto vin_deq = vdequantize(vin, qi_in);
167 
168 #ifdef __aarch64__
169  const uint32x4x4_t pos_mask =
170  {
171  {
172  wrapper::vcgtz(vin_deq.val[0]),
173  wrapper::vcgtz(vin_deq.val[1]),
174  wrapper::vcgtz(vin_deq.val[2]),
175  wrapper::vcgtz(vin_deq.val[3]),
176  }
177  };
178 #else // __aarch64__
179  const uint32x4x4_t pos_mask =
180  {
181  {
182  wrapper::vcgt(vin_deq.val[0], vconst_0_f32),
183  wrapper::vcgt(vin_deq.val[1], vconst_0_f32),
184  wrapper::vcgt(vin_deq.val[2], vconst_0_f32),
185  wrapper::vcgt(vin_deq.val[3], vconst_0_f32),
186  }
187  };
188 #endif // __aarch64__
189 
190  const float32x4x4_t tmp_dep =
191  {
192  {
193  wrapper::vbsl(pos_mask.val[0], vin_deq.val[0], wrapper::vmul(va_f32, vin_deq.val[0])),
194  wrapper::vbsl(pos_mask.val[1], vin_deq.val[1], wrapper::vmul(va_f32, vin_deq.val[1])),
195  wrapper::vbsl(pos_mask.val[2], vin_deq.val[2], wrapper::vmul(va_f32, vin_deq.val[2])),
196  wrapper::vbsl(pos_mask.val[3], vin_deq.val[3], wrapper::vmul(va_f32, vin_deq.val[3])),
197  }
198  };
199 
200  tmp = vquantize(tmp_dep, qi_out);
201  }
202  else
203  {
204  ARM_COMPUTE_ERROR("Unsupported activation function");
205  }
206  wrapper::vstore(output_ptr + x, tmp);
207  }
208 
209  // Compute left-over elements
210  for(; x < window_end_x; ++x)
211  {
212  qasymm8_t in = *(reinterpret_cast<const qasymm8_t *>(input_ptr + x));
213  qasymm8_t tmp = 0;
215  {
216  tmp = std::max(const_0, in);
217  tmp = utility::clamp<int32_t, qasymm8_t>(tmp * s + o);
218  }
220  {
221  tmp = std::min(a, std::max(const_0, in));
222  tmp = utility::clamp<int32_t, qasymm8_t>(tmp * s + o);
223  }
225  {
226  tmp = std::min(a, std::max(b, in));
227  tmp = utility::clamp<int32_t, qasymm8_t>(tmp * s + o);
228  }
230  {
231  float tmp_f = dequantize_qasymm8(in, qi_in);
232  tmp_f = 1.f / (1.f + std::exp(-tmp_f));
233  tmp = quantize_qasymm8(tmp_f, qi_out);
234  }
236  {
237  float tmp_f = dequantize_qasymm8(in, qi_in);
238  tmp_f = a_f32 * std::tanh(b_f32 * tmp_f);
239  tmp = quantize_qasymm8(tmp_f, qi_out);
240  }
242  {
243  float tmp_f = dequantize_qasymm8(in, qi_in);
244  tmp_f = tmp_f * ((std::min(std::max((tmp_f + 3), 0.0f), 6.0f)) * 0.166666667f);
245  tmp = quantize_qasymm8(tmp_f, qi_out);
246  }
248  {
249  float tmp_f = dequantize_qasymm8(in, qi_in);
250  tmp_f = tmp_f > 0 ? tmp_f : tmp_f * a_f32;
251  tmp = quantize_qasymm8(tmp_f, qi_out);
252  }
253  else
254  {
255  ARM_COMPUTE_ERROR("Unsupported activation function");
256  }
257  *(output_ptr + x) = tmp;
258  }
259  },
260  input, output);
261 }
262 } // namespace cpu
263 } // namespace arm_compute
float32x2_t vdiv(const float32x2_t &a, const float32x2_t &b)
Definition: div.h:58
float32x4_t vtanh(const float32x4_t &a)
Definition: tanh.h:40
float dequantize_qasymm8(uint8_t value, const INFO_TYPE &qinfo)
Dequantize a value given an unsigned 8-bit asymmetric quantization scheme.
SimpleTensor< float > b
Definition: DFT.cpp:157
uint8_t quantize_qasymm8(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given an unsigned 8-bit asymmetric quantization scheme.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
float32x4x2_t vdequantize(const uint8x8_t &qv, const UniformQuantizationInfo &qi)
Dequantize a neon vector holding 8 quantized values.
Definition: NEAsymm.h:415
float a() const
Get the alpha value.
Definition: Types.h:1590
uint8x16_t vloadq(const uint8_t *ptr)
Definition: load.h:58
uint8x8_t vadd(const uint8x8_t &a, const uint8x8_t &b)
Definition: add.h:39
Quantization info when assuming per layer quantization.
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:77
Activation Layer Information class.
Definition: Types.h:1550
Interface for Neon tensor.
Definition: ITensor.h:36
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2021 Arm Limited.
ActivationFunction
Available activation functions.
Definition: Types.h:1554
uint8x16_t vmlaq_qasymm8(qasymm8x16_t vd, float32x4_t vs, float32x4_t vo)
Perform a multiply-accumulate on all 16 components of a QASYMM8 vector.
Definition: NEAsymm.inl:26
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
Window collapse_if_possible(const Window &full_window, size_t first, size_t last, bool *has_collapsed=nullptr) const
Collapse the dimensions between first and last if possible.
Definition: Window.inl:68
uint8x8_t vmin(const uint8x8_t &a, const uint8x8_t &b)
Definition: min.h:39
Coordinates of an item.
Definition: Coordinates.h:37
UniformQuantizationInfo uniform() const
Return per layer quantization info.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
Definition: Helpers.inl:139
void qasymm8_neon_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
Definition: qasymm8.cpp:40
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
int8x8_t vneg(const int8x8_t &a)
Definition: neg.h:39
uint8x8_t vcgt(const uint8x8_t &a, const uint8x8_t &b)
Definition: cgt.h:39
uint8x8_t vmul(const uint8x8_t &a, const uint8x8_t &b)
Definition: mul.h:39
uint8x8_t vbsl(const uint8x8_t &a, const uint8x8_t &b, const uint8x8_t &c)
Definition: bsl.h:39
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
uint8_t qasymm8_t
8 bit quantized asymmetric scalar value
uint8x8_t vquantize(const float32x4x2_t &qv, const UniformQuantizationInfo &qi)
Quantize a neon vector holding 8 floating point values.
Definition: NEAsymm.h:602
void vstore(uint8_t *ptr, uint8x8_t val)
Definition: store.h:39
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
typename neon_bitvector< T, BW >::type neon_bitvector_t
Helper type template to get the type of a neon vector.
Definition: traits.h:130
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1585
float b() const
Get the beta value.
Definition: Types.h:1595
Includes all wrapper headers at once.
constexpr int end() const
Return the end of the dimension.
Definition: Window.h:99
Iterator updated by execute_window_loop for each window element.
Definition: Helpers.h:46
uint8x8_t vmax(const uint8x8_t &a, const uint8x8_t &b)
Definition: max.h:39
constexpr int start() const
Return the start of the dimension.
Definition: Window.h:94
float32x4_t vexpq(const float32x4_t &a)
Definition: exp.h:47
Describe a multidimensional execution window.
Definition: Window.h:39
constexpr const Dimension & x() const
Alias to access the first dimension of the window.
Definition: Window.h:145
uint8x16_t qasymm8x16_t
8 bit quantized asymmetric vector with 16 elements
Definition: NEAsymm.h:37