Compute Library
 21.02
NEQLSTMLayerNormalizationKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
28 #include "arm_compute/core/Utils.h"
32 #include "src/core/CPP/Validate.h"
34 #include "src/core/NEON/NEMath.h"
35 #include "src/core/NEON/NESymm.h"
38 
40 
41 #include <map>
42 
43 namespace arm_compute
44 {
45 namespace
46 {
47 inline std::pair<int64_t, int64_t> compute_mean_variance(int64_t sum, int64_t sum_sq, uint32_t num_input)
48 {
49  const auto temp = static_cast<int64_t>(0x100000) / num_input;
50  const auto mean = sum * 1024 / static_cast<int64_t>(num_input);
51  const int64_t variance = ((sum_sq * temp) - (mean * mean)) / 0x100000;
52 
53  return std::make_pair(mean, variance);
54 }
55 
56 inline int64x2x2_t mul_add(const int32x4_t &a, const int32x4_t &b, const int32x4_t &bias)
57 {
58  using namespace wrapper;
59  const int64x2_t a_low = vmovl(vgetlow(a));
60  const int64x2_t a_high = vmovl(vgethigh(a));
61  const int64x2_t b_low = vmovl(vgetlow(b));
62  const int64x2_t b_high = vmovl(vgethigh(b));
63 
64  const int64_t a_0 = vgetlane(a_low, 0);
65  const int64_t a_1 = vgetlane(a_low, 1);
66  const int64_t a_2 = vgetlane(a_high, 0);
67  const int64_t a_3 = vgetlane(a_high, 1);
68 
69  const int64_t b_0 = vgetlane(b_low, 0);
70  const int64_t b_1 = vgetlane(b_low, 1);
71  const int64_t b_2 = vgetlane(b_high, 0);
72  const int64_t b_3 = vgetlane(b_high, 1);
73 
74  int64x2x2_t result;
75  const int64x2_t result_0{ a_0 * b_0, a_1 * b_1 };
76  const int64x2_t result_1{ a_2 * b_2, a_3 * b_3 };
77  result.val[0] = vadd(vmovl(vgetlow(bias)), result_0);
78  result.val[1] = vadd(vmovl(vgethigh(bias)), result_1);
79 
80  return result;
81 }
82 } // namespace
83 
84 void NEQLSTMLayerNormalizationKernel::configure(const ITensor *input, ITensor *output, const ITensor *weight, const ITensor *bias)
85 {
86  ARM_COMPUTE_ERROR_ON_NULLPTR(input, weight, bias, output);
87  ARM_COMPUTE_ERROR_ON(input == output);
88  ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), weight->info(), bias->info()));
89 
90  static const std::map<DataType, ComputeFuncType> fn_map =
91  {
92  { DataType::QSYMM16, std::mem_fn(&NEQLSTMLayerNormalizationKernel::compute_qsymm16) },
93  };
94 
95  _input = input;
96  _output = output;
97  _weight = weight;
98  _bias = bias;
99  _fn = fn_map.at(_input->info()->data_type());
100 
101  auto_init_if_empty(*_output->info(), *_input->info());
102  _output->info()->set_quantization_info(compute_output_qinfo());
103 
104  const UniformQuantizationInfo wq_info = _weight->info()->quantization_info().uniform();
105  const Status s = quantization::calculate_quantized_multiplier(wq_info.scale, &_output_multiplier, &_output_shift);
106  _output_shift *= -1;
107 
108  if(!bool(s))
109  {
110  _output_multiplier = 0;
111  _output_shift = 0;
112  }
113 
114  Window win = configure_window(output);
115  INEKernel::configure(win);
116 }
117 
118 Window NEQLSTMLayerNormalizationKernel::configure_window(ITensor *target)
119 {
120  Window window = calculate_max_window(*target->info(), Steps());
121  Coordinates coord;
122  coord.set_num_dimensions(target->info()->num_dimensions());
123  target->info()->set_valid_region(ValidRegion(coord, target->info()->tensor_shape()));
124 
125  _window_start_x = static_cast<int32_t>(window.x().start());
126  _window_end_x = static_cast<int32_t>(window.x().end());
127  _window_step_x = static_cast<int32_t>(vector_size_byte) / _output->info()->element_size();
128 
129  // input and output windows will iterator over y-axis, while execute_window will handler x-axis.
130  _inout_window = window;
131  _inout_window.set(Window::DimX, Window::Dimension(0, 1, 1));
132 
133  // weight and bias cannot iterator along y-axis since they are 1D.
134  _weight_window = _inout_window;
135  _weight_window.set(Window::DimY, Window::Dimension(0, 1, 1));
136 
137  return window;
138 }
139 
141 {
142  ARM_COMPUTE_UNUSED(output, bias, weight, input);
143 
144  ARM_COMPUTE_ERROR_ON_NULLPTR(input, weight, bias, output);
145 
149 
150  ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > max_input_dimension);
151  ARM_COMPUTE_RETURN_ERROR_ON(weight->num_dimensions() > max_weight_dimension);
152  ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > max_bias_dimension);
153 
154  ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().x() != weight->tensor_shape().x());
156 
157  if(output->total_size() != 0)
158  {
161  }
162 
163  return Status{};
164 }
165 
167 {
168  ARM_COMPUTE_UNUSED(window, info);
171  ARM_COMPUTE_ERROR_ON_MSG(!_fn, "internal function is not defined for computation");
172 
173  _fn(*this);
174 }
175 
176 inline QuantizationInfo NEQLSTMLayerNormalizationKernel::compute_output_qinfo()
177 {
178  return QuantizationInfo(1.f / 4096);
179 }
180 
181 inline std::pair<int64_t, int64_t> NEQLSTMLayerNormalizationKernel::sum_qsymm16(const int16_t *input_ptr)
182 {
183  ARM_COMPUTE_ERROR_ON(!input_ptr);
184 
185  using AccType = int64_t;
186  using InputDataType = int16_t;
187 
188  AccType sum{ 0 };
189  AccType sum_sq{ 0 };
190 
191  int32_t x = _window_start_x;
192  for(; x <= _window_end_x && _window_step_x <= (_window_end_x - x); x += _window_step_x)
193  {
194  using namespace wrapper;
195  const int16x8_t val = vloadq(input_ptr + x);
196  const int32x4_t val_low = vmovl(vgetlow(val));
197  const int32x4_t val_high = vmovl(vgethigh(val));
198 
199 #if defined(__aarch64__)
200  sum += static_cast<AccType>(vaddv(val_low));
201  sum += static_cast<AccType>(vaddv(val_high));
202 
203  sum_sq += static_cast<AccType>(vaddv(vmul(val_low, val_low)));
204  sum_sq += static_cast<AccType>(vaddv(vmul(val_high, val_high)));
205 #else // __aarch64__
206  // only AArch64 supports vaddv
207  const int64x2_t pair_sum_low = vpaddl(val_low);
208  const int64x2_t pair_sum_high = vpaddl(val_high);
209  const int64x2_t pair_sum = vadd(pair_sum_low, pair_sum_high);
210  sum += vgetlane(pair_sum, 0) + vgetlane(pair_sum, 1);
211 
212  const int32x4_t square_low = vmul(val_low, val_low);
213  const int32x4_t square_high = vmul(val_high, val_high);
214  const int64x2_t pair_sum_sq_low = vpaddl(square_low);
215  const int64x2_t pair_sum_sq_high = vpaddl(square_high);
216  const int64x2_t pair_sum_sq = vadd(pair_sum_sq_low, pair_sum_sq_high);
217  sum_sq += vgetlane(pair_sum_sq, 0) + vgetlane(pair_sum_sq, 1);
218 #endif // __aarch64__
219  }
220 
221  for(; x < _window_end_x; ++x)
222  {
223  const InputDataType val = input_ptr[x];
224  sum += static_cast<AccType>(val);
225  sum_sq += static_cast<AccType>(val * val);
226  }
227 
228  return std::make_pair(sum, sum_sq);
229 }
230 
231 inline void NEQLSTMLayerNormalizationKernel::normalize_qasymm16(const int16_t *input_ptr,
232  int16_t *output_ptr,
233  const int16_t *weight_ptr,
234  const int32_t *bias_ptr,
235  int32_t mean, int32_t inv_std_mul, int32_t inv_std_shift)
236 {
237  using OutputDataType = int16_t;
238 
239  using namespace wrapper;
240  const int32x4_t mean_vec = vdup_n(mean, wrapper::traits::vector_128_tag{});
241 
242  int32_t x = _window_start_x;
243  for(; x <= _window_end_x && _window_step_x <= (_window_end_x - x); x += _window_step_x)
244  {
245  const int16x8_t val = vloadq(input_ptr + x);
246  int32x4x2_t shifted;
247  shifted.val[0] = vsub(vshlq_n_s32(vmovl(vgetlow(val)), 10), mean_vec);
248  shifted.val[1] = vsub(vshlq_n_s32(vmovl(vgethigh(val)), 10), mean_vec);
249 
250  int32x4x2_t rescaled = multiply_by_quantized_multiplier_2row(shifted, inv_std_mul, inv_std_shift);
251 
252  const int16x8_t weight_val = vloadq(weight_ptr + x);
253  const int32x4_t weight_low = vmovl(vgetlow(weight_val));
254  const int32x4_t weight_high = vmovl(vgethigh(weight_val));
255 
256  const int32x4_t bias_low = vloadq(bias_ptr + x);
257  const int32x4_t bias_high = vloadq(bias_ptr + 4 + x);
258 
259  int64x2x2_t result_0 = mul_add(rescaled.val[0], weight_low, bias_low);
260  int64x2x2_t result_1 = mul_add(rescaled.val[1], weight_high, bias_high);
261 
262  int32x4x2_t combined;
263  combined.val[0] = vcombine(vmovn(vrshrq_n_s64(result_0.val[0], 10)), vmovn(vrshrq_n_s64(result_0.val[1], 10)));
264  combined.val[1] = vcombine(vmovn(vrshrq_n_s64(result_1.val[0], 10)), vmovn(vrshrq_n_s64(result_1.val[1], 10)));
265 
266  int32x4x2_t out_val = multiply_by_quantized_multiplier_2row(combined, _output_multiplier, _output_shift + 12);
267 
268  vstore(output_ptr + x, vqmovn(out_val.val[0]));
269  vstore(output_ptr + x + 4, vqmovn(out_val.val[1]));
270  }
271 
272  for(; x < _window_end_x; ++x)
273  {
274  const auto val = static_cast<int32_t>(input_ptr[x]);
275  const int32_t shifted = (val << 10) - mean;
276  const int32_t rescaled = quantization::multiply_by_quantized_multiplier(shifted, inv_std_mul, inv_std_shift);
277  const int64_t weighted = rescaled * weight_ptr[x] + bias_ptr[x];
278  const auto reverse_shifted = static_cast<int32_t>((weighted + 512) >> 10);
279  int32_t out_val = quantization::multiply_by_quantized_multiplier(reverse_shifted, _output_multiplier, _output_shift + 12);
280  out_val = utility::clamp<decltype(out_val), OutputDataType>(out_val, std::numeric_limits<OutputDataType>::min());
281  output_ptr[x] = static_cast<OutputDataType>(out_val);
282  }
283 }
284 
285 void NEQLSTMLayerNormalizationKernel::compute_qsymm16()
286 {
287  using InputDataType = int16_t;
288  using OutputDataType = int16_t;
289  using BiasDataType = int32_t;
290  using AccType = int64_t;
291 
292  Iterator input_iterator{ _input, _inout_window };
293  Iterator output_iterator{ _output, _inout_window };
294  Iterator weight_iterator{ _weight, _weight_window };
295  Iterator bias_iterator{ _bias, _weight_window };
296 
297  const auto weight_ptr = reinterpret_cast<const InputDataType *>(weight_iterator.ptr());
298  const auto bias_ptr = reinterpret_cast<const BiasDataType *>(bias_iterator.ptr());
299 
300  const uint32_t column_size = _input->info()->tensor_shape()[0];
301 
302  execute_window_loop(_inout_window, [ &, this](const Coordinates &)
303  {
304  const auto in_ptr = reinterpret_cast<const InputDataType *>(input_iterator.ptr());
305  auto out_ptr = reinterpret_cast<OutputDataType *>(output_iterator.ptr());
306 
307  AccType sum{ 0 };
308  AccType sum_sq{ 0 };
309  std::tie(sum, sum_sq) = sum_qsymm16(in_ptr);
310 
311  AccType mean{ 0 };
312  AccType variance{ 0 };
313  std::tie(mean, variance) = compute_mean_variance(sum, sum_sq, column_size);
314 
315  int32_t stddev_invsqrt_mul{};
316  int32_t stddev_invsqrt_shift{};
317  quantization::get_invsqrt_quantized_multiplier_exp(static_cast<int32_t>(variance), -1, stddev_invsqrt_mul, stddev_invsqrt_shift);
318 
319  normalize_qasymm16(in_ptr, out_ptr, weight_ptr, bias_ptr, mean, stddev_invsqrt_mul, stddev_invsqrt_shift);
320  },
321  input_iterator, output_iterator);
322 }
323 } // namespace arm_compute
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *weight, const ITensorInfo *bias)
Static function to check if given info will lead to a valid configuration of NEQLSTMLayerNormalizatio...
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
quantized, symmetric fixed-point 16-bit number
uint32x2_t vmovn(const uint64x2_t &a)
Definition: movn.h:39
SimpleTensor< float > b
Definition: DFT.cpp:157
uint8x16_t vloadq(const uint8_t *ptr)
Definition: load.h:58
DATA_TYPE sum(__global const DATA_TYPE *input)
Calculate sum of a vector.
uint8x8_t vadd(const uint8x8_t &a, const uint8x8_t &b)
Definition: add.h:39
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
uint8x8_t vsub(const uint8x8_t &a, const uint8x8_t &b)
Definition: sub.h:39
Quantization info when assuming per layer quantization.
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:77
Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplier, int32_t *shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier.
Status class.
Definition: Error.h:52
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Interface for Neon tensor.
Definition: ITensor.h:36
Copyright (c) 2017-2021 Arm Limited.
virtual void set_valid_region(const ValidRegion &valid_region)=0
Set the valid region of the tensor.
1 channel, 1 S32 per channel
T x() const
Alias to access the size of the first dimension.
Definition: Dimensions.h:87
uint32x2_t vqmovn(const uint64x2_t &a)
Definition: movn.h:52
Quantization information.
uint8_t vgetlane(const uint8x8_t vector, const unsigned int lane)
Definition: getlane.h:91
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
int32x4x2_t multiply_by_quantized_multiplier_2row(int32x4x2_t input, int32_t qmul, int32_t shift)
Multiply a neon vector using quantized multiplier and shift.
Definition: NESymm.h:242
void get_invsqrt_quantized_multiplier_exp(int32_t input, int32_t reverse_shift, int32_t &output_inv_sqrt, int32_t &output_shift)
Compute quantized multiplier and shift for the inverse square root of input.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
int32_t multiply_by_quantized_multiplier(int32_t input, int32_t qmul, int32_t shift)
Compute the value multiplied by given quantized multiplier and shift.
Class to describe a number of elements in each dimension.
Definition: Steps.h:40
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
Coordinates of an item.
Definition: Coordinates.h:37
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
uint8x8_t vgetlow(const uint8x16_t val)
Definition: getlow.h:39
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
uint8x16_t vcombine(const uint8x8_t &a, const uint8x8_t &b)
Definition: combine.h:39
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941
uint8x8_t vgethigh(const uint8x16_t val)
Definition: gethigh.h:39
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
uint8x8_t vmul(const uint8x8_t &a, const uint8x8_t &b)
Definition: mul.h:39
Information about executing thread and CPU.
Definition: CPPTypes.h:235
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:443
void configure(const ITensor *input, ITensor *output, const ITensor *weight, const ITensor *bias)
Set the input and output tensors.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
void vstore(uint8_t *ptr, uint8x8_t val)
Definition: store.h:39
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
uint8x8_t vdup_n(uint8_t value, traits::vector_64_tag)
Definition: dup_n.h:41
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
uint16x4_t vpaddl(const uint8x8_t &a)
Definition: add.h:165
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
void set_num_dimensions(size_t num_dimensions)
Set number of dimensions.
Definition: Dimensions.h:149
Container for valid region of a window.
Definition: Types.h:188
constexpr int end() const
Return the end of the dimension.
Definition: Window.h:99
Iterator updated by execute_window_loop for each window element.
Definition: Helpers.h:46
uint16x8_t vmovl(const uint8x8_t &a)
Definition: movl.h:39
constexpr int start() const
Return the start of the dimension.
Definition: Window.h:94
Describe a multidimensional execution window.
Definition: Window.h:39
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)
constexpr const Dimension & x() const
Alias to access the first dimension of the window.
Definition: Window.h:145