47 inline std::pair<int64_t, int64_t> compute_mean_variance(int64_t
sum, int64_t sum_sq, uint32_t num_input)
49 const auto temp =
static_cast<int64_t
>(0x100000) / num_input;
50 const auto mean = sum * 1024 /
static_cast<int64_t
>(num_input);
51 const int64_t variance = ((sum_sq * temp) - (mean * mean)) / 0x100000;
53 return std::make_pair(mean, variance);
56 inline int64x2x2_t mul_add(
const int32x4_t &a,
const int32x4_t &
b,
const int32x4_t &bias)
58 using namespace wrapper;
64 const int64_t a_0 =
vgetlane(a_low, 0);
65 const int64_t a_1 =
vgetlane(a_low, 1);
66 const int64_t a_2 =
vgetlane(a_high, 0);
67 const int64_t a_3 =
vgetlane(a_high, 1);
69 const int64_t b_0 =
vgetlane(b_low, 0);
70 const int64_t b_1 =
vgetlane(b_low, 1);
71 const int64_t b_2 =
vgetlane(b_high, 0);
72 const int64_t b_3 =
vgetlane(b_high, 1);
75 const int64x2_t result_0{ a_0 * b_0, a_1 * b_1 };
76 const int64x2_t result_1{ a_2 * b_2, a_3 * b_3 };
90 static const std::map<DataType, ComputeFuncType> fn_map =
92 {
DataType::QSYMM16, std::mem_fn(&NEQLSTMLayerNormalizationKernel::compute_qsymm16) },
99 _fn = fn_map.at(_input->info()->data_type());
102 _output->info()->set_quantization_info(compute_output_qinfo());
110 _output_multiplier = 0;
114 Window win = configure_window(output);
115 INEKernel::configure(win);
118 Window NEQLSTMLayerNormalizationKernel::configure_window(
ITensor *target)
125 _window_start_x =
static_cast<int32_t
>(window.
x().
start());
126 _window_end_x =
static_cast<int32_t
>(window.
x().
end());
127 _window_step_x =
static_cast<int32_t
>(vector_size_byte) / _output->info()->element_size();
130 _inout_window = window;
134 _weight_window = _inout_window;
176 inline QuantizationInfo NEQLSTMLayerNormalizationKernel::compute_output_qinfo()
181 inline std::pair<int64_t, int64_t> NEQLSTMLayerNormalizationKernel::sum_qsymm16(
const int16_t *input_ptr)
185 using AccType = int64_t;
186 using InputDataType = int16_t;
191 int32_t x = _window_start_x;
192 for(; x <= _window_end_x && _window_step_x <= (_window_end_x - x); x += _window_step_x)
194 using namespace wrapper;
195 const int16x8_t val =
vloadq(input_ptr + x);
199 #if defined(__aarch64__) 200 sum +=
static_cast<AccType
>(vaddv(val_low));
201 sum +=
static_cast<AccType
>(vaddv(val_high));
203 sum_sq +=
static_cast<AccType
>(vaddv(
vmul(val_low, val_low)));
204 sum_sq +=
static_cast<AccType
>(vaddv(
vmul(val_high, val_high)));
207 const int64x2_t pair_sum_low =
vpaddl(val_low);
208 const int64x2_t pair_sum_high =
vpaddl(val_high);
209 const int64x2_t pair_sum =
vadd(pair_sum_low, pair_sum_high);
212 const int32x4_t square_low =
vmul(val_low, val_low);
213 const int32x4_t square_high =
vmul(val_high, val_high);
214 const int64x2_t pair_sum_sq_low =
vpaddl(square_low);
215 const int64x2_t pair_sum_sq_high =
vpaddl(square_high);
216 const int64x2_t pair_sum_sq =
vadd(pair_sum_sq_low, pair_sum_sq_high);
218 #endif // __aarch64__ 221 for(; x < _window_end_x; ++x)
223 const InputDataType val = input_ptr[x];
224 sum +=
static_cast<AccType
>(val);
225 sum_sq +=
static_cast<AccType
>(val * val);
228 return std::make_pair(sum, sum_sq);
231 inline void NEQLSTMLayerNormalizationKernel::normalize_qasymm16(
const int16_t *input_ptr,
233 const int16_t *weight_ptr,
234 const int32_t *bias_ptr,
235 int32_t mean, int32_t inv_std_mul, int32_t inv_std_shift)
237 using OutputDataType = int16_t;
239 using namespace wrapper;
242 int32_t x = _window_start_x;
243 for(; x <= _window_end_x && _window_step_x <= (_window_end_x - x); x += _window_step_x)
245 const int16x8_t val =
vloadq(input_ptr + x);
252 const int16x8_t weight_val =
vloadq(weight_ptr + x);
253 const int32x4_t weight_low =
vmovl(
vgetlow(weight_val));
256 const int32x4_t bias_low =
vloadq(bias_ptr + x);
257 const int32x4_t bias_high =
vloadq(bias_ptr + 4 + x);
259 int64x2x2_t result_0 = mul_add(rescaled.val[0], weight_low, bias_low);
260 int64x2x2_t result_1 = mul_add(rescaled.val[1], weight_high, bias_high);
262 int32x4x2_t combined;
263 combined.val[0] =
vcombine(
vmovn(vrshrq_n_s64(result_0.val[0], 10)),
vmovn(vrshrq_n_s64(result_0.val[1], 10)));
264 combined.val[1] =
vcombine(
vmovn(vrshrq_n_s64(result_1.val[0], 10)),
vmovn(vrshrq_n_s64(result_1.val[1], 10)));
272 for(; x < _window_end_x; ++x)
274 const auto val =
static_cast<int32_t
>(input_ptr[x]);
275 const int32_t shifted = (val << 10) - mean;
277 const int64_t weighted = rescaled * weight_ptr[x] + bias_ptr[x];
278 const auto reverse_shifted =
static_cast<int32_t
>((weighted + 512) >> 10);
280 out_val = utility::clamp<decltype(out_val), OutputDataType>(out_val, std::numeric_limits<OutputDataType>::min());
281 output_ptr[x] =
static_cast<OutputDataType
>(out_val);
285 void NEQLSTMLayerNormalizationKernel::compute_qsymm16()
287 using InputDataType = int16_t;
288 using OutputDataType = int16_t;
289 using BiasDataType = int32_t;
290 using AccType = int64_t;
292 Iterator input_iterator{ _input, _inout_window };
293 Iterator output_iterator{ _output, _inout_window };
294 Iterator weight_iterator{ _weight, _weight_window };
295 Iterator bias_iterator{ _bias, _weight_window };
297 const auto weight_ptr =
reinterpret_cast<const InputDataType *
>(weight_iterator.ptr());
298 const auto bias_ptr =
reinterpret_cast<const BiasDataType *
>(bias_iterator.ptr());
300 const uint32_t column_size = _input->info()->tensor_shape()[0];
304 const auto in_ptr =
reinterpret_cast<const InputDataType *
>(input_iterator.ptr());
305 auto out_ptr =
reinterpret_cast<OutputDataType *
>(output_iterator.ptr());
309 std::tie(sum, sum_sq) = sum_qsymm16(in_ptr);
312 AccType variance{ 0 };
313 std::tie(mean, variance) = compute_mean_variance(sum, sum_sq, column_size);
315 int32_t stddev_invsqrt_mul{};
316 int32_t stddev_invsqrt_shift{};
319 normalize_qasymm16(in_ptr, out_ptr, weight_ptr, bias_ptr, mean, stddev_invsqrt_mul, stddev_invsqrt_shift);
321 input_iterator, output_iterator);
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *weight, const ITensorInfo *bias)
Static function to check if given info will lead to a valid configuration of NEQLSTMLayerNormalizatio...
const Window & window() const
The maximum window the kernel can be executed on.
quantized, symmetric fixed-point 16-bit number
uint32x2_t vmovn(const uint64x2_t &a)
uint8x16_t vloadq(const uint8_t *ptr)
DATA_TYPE sum(__global const DATA_TYPE *input)
Calculate sum of a vector.
uint8x8_t vadd(const uint8x8_t &a, const uint8x8_t &b)
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Store the tensor's metadata.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
uint8x8_t vsub(const uint8x8_t &a, const uint8x8_t &b)
Describe one of the image's dimensions with a start, end and step.
Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplier, int32_t *shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Interface for Neon tensor.
Copyright (c) 2017-2021 Arm Limited.
virtual void set_valid_region(const ValidRegion &valid_region)=0
Set the valid region of the tensor.
1 channel, 1 S32 per channel
T x() const
Alias to access the size of the first dimension.
uint32x2_t vqmovn(const uint64x2_t &a)
Quantization information.
uint8_t vgetlane(const uint8x8_t vector, const unsigned int lane)
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
int32x4x2_t multiply_by_quantized_multiplier_2row(int32x4x2_t input, int32_t qmul, int32_t shift)
Multiply a neon vector using quantized multiplier and shift.
void get_invsqrt_quantized_multiplier_exp(int32_t input, int32_t reverse_shift, int32_t &output_inv_sqrt, int32_t &output_shift)
Compute quantized multiplier and shift for the inverse square root of input.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
int32_t multiply_by_quantized_multiplier(int32_t input, int32_t qmul, int32_t shift)
Compute the value multiplied by given quantized multiplier and shift.
Class to describe a number of elements in each dimension.
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
uint8x8_t vgetlow(const uint8x16_t val)
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
uint8x16_t vcombine(const uint8x8_t &a, const uint8x8_t &b)
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
uint8x8_t vgethigh(const uint8x16_t val)
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
uint8x8_t vmul(const uint8x8_t &a, const uint8x8_t &b)
Information about executing thread and CPU.
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
void configure(const ITensor *input, ITensor *output, const ITensor *weight, const ITensor *bias)
Set the input and output tensors.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
void vstore(uint8_t *ptr, uint8x8_t val)
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
uint8x8_t vdup_n(uint8_t value, traits::vector_64_tag)
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
uint16x4_t vpaddl(const uint8x8_t &a)
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
void set_num_dimensions(size_t num_dimensions)
Set number of dimensions.
Container for valid region of a window.
constexpr int end() const
Return the end of the dimension.
Iterator updated by execute_window_loop for each window element.
uint16x8_t vmovl(const uint8x8_t &a)
constexpr int start() const
Return the start of the dimension.
Describe a multidimensional execution window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)
constexpr const Dimension & x() const
Alias to access the first dimension of the window.