50 if(output->tensor_shape().total_size() > 0)
60 std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
70 coord.set_num_dimensions(output->num_dimensions());
71 output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
73 return std::make_tuple(Status{}, win);
77 inline void store_result(T *ptr,
const float32x4x4_t &v)
83 inline void store_result<float>(
float *ptr,
const float32x4x4_t &v)
91 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 93 inline void store_result<float16_t>(float16_t *ptr,
const float32x4x4_t &v)
95 wrapper::vstore(ptr, vcombine_f16(vcvt_f16_f32(v.val[0]), vcvt_f16_f32(v.val[1])));
96 wrapper::vstore(ptr + 8, vcombine_f16(vcvt_f16_f32(v.val[2]), vcvt_f16_f32(v.val[3])));
100 template <
typename T>
101 inline void store_result(T *ptr,
const float32x4x2_t &v)
107 inline void store_result<float>(
float *ptr,
const float32x4x2_t &v)
113 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 115 inline void store_result<float16_t>(float16_t *ptr,
const float32x4x2_t &v)
117 wrapper::vstore(ptr, vcombine_f16(vcvt_f16_f32(v.val[0]), vcvt_f16_f32(v.val[1])));
121 template <
typename TOut,
typename TIn>
122 void run_dequantization_qasymm8(
const ITensor *input, ITensor *output,
const Window &window)
124 const UniformQuantizationInfo &
qinfo = input->info()->quantization_info().
uniform();
125 const float scale = qinfo.scale;
126 const int32_t
offset = qinfo.offset;
128 const int window_step_x = 16;
129 const auto window_start_x =
static_cast<int>(window.x().start());
130 const auto window_end_x =
static_cast<int>(window.x().end());
133 Window win_collapsed = window.collapse_if_possible(window,
Window::DimZ);
134 win_collapsed.set(
Window::DimX, Window::Dimension(0, 1, 1));
137 Iterator in(input, win_collapsed);
138 Iterator out(output, win_collapsed);
142 const auto in_ptr =
reinterpret_cast<const TIn *
>(in.ptr());
143 const auto out_ptr =
reinterpret_cast<TOut *
>(out.ptr());
145 int x = window_start_x;
146 for(; x <= (window_end_x - window_step_x); x += window_step_x)
151 store_result(reinterpret_cast<TOut *>(out_ptr + x), vdeq);
155 for(; x < window_end_x; ++x)
157 auto val = *(in_ptr + x);
164 template <
typename T>
165 void run_dequantization_qsymm8_per_channel_nchw(
const ITensor *input, ITensor *output,
const Window &window)
167 const auto scale = input->info()->quantization_info().scale();
169 const int window_step_x = 16;
170 const auto window_start_x =
static_cast<int>(window.x().start());
171 const auto window_end_x =
static_cast<int>(window.x().end());
178 Iterator in(input, win);
179 Iterator out(output, win);
183 const auto in_ptr =
reinterpret_cast<const int8_t *
>(in.ptr());
184 const auto out_ptr =
reinterpret_cast<T *
>(out.ptr());
186 int x = window_start_x;
187 for(; x <= (window_end_x - window_step_x); x += window_step_x)
192 store_result<T>(
reinterpret_cast<T *
>(out_ptr + x), vdeq);
196 for(; x < window_end_x; ++x)
198 int8_t val = *(in_ptr + x);
199 *(out_ptr + x) = static_cast<T>(
dequantize(val, scale[
id.z()]));
205 template <
typename T>
206 void run_dequantization_qsymm8_per_channel_nhwc(
const ITensor *input, ITensor *output,
const Window &window)
208 const auto scale = input->info()->quantization_info().scale();
210 const int window_step_x = 16;
211 const auto window_start_x =
static_cast<int>(window.x().start());
212 const auto window_end_x =
static_cast<int>(window.x().end());
219 Iterator in(input, win);
220 Iterator out(output, win);
224 const auto in_ptr =
reinterpret_cast<const int8_t *
>(in.ptr());
225 const auto out_ptr =
reinterpret_cast<T *
>(out.ptr());
227 int x = window_start_x;
228 for(; x <= (window_end_x - window_step_x); x += window_step_x)
230 const float32x4x4_t vscale =
233 scale[x + 0], scale[x + 1], scale[x + 2], scale[x + 3],
234 scale[x + 4], scale[x + 5], scale[x + 6], scale[x + 7],
235 scale[x + 8], scale[x + 9], scale[x + 10], scale[x + 11],
236 scale[x + 12], scale[x + 13], scale[x + 14], scale[x + 15]
242 store_result<T>(
reinterpret_cast<T *
>(out_ptr + x), vdeq);
246 for(; x < window_end_x; ++x)
248 int8_t val = *(in_ptr + x);
249 *(out_ptr + x) = static_cast<T>(
dequantize(val, scale[x]));
255 template <
typename T>
256 void run_dequantization_qsymm8(
const ITensor *input, ITensor *output,
const Window &window)
258 const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
259 const float scale = qinfo.scale;
261 const int window_step_x = 16;
262 const auto window_start_x =
static_cast<int>(window.x().start());
263 const auto window_end_x =
static_cast<int>(window.x().end());
266 Window win_collapsed = window.collapse_if_possible(window,
Window::DimZ);
267 win_collapsed.set(
Window::DimX, Window::Dimension(0, 1, 1));
270 Iterator in(input, win_collapsed);
271 Iterator out(output, win_collapsed);
275 const auto in_ptr =
reinterpret_cast<const int8_t *
>(in.ptr());
276 const auto out_ptr =
reinterpret_cast<T *
>(out.ptr());
278 int x = window_start_x;
279 for(; x <= (window_end_x - window_step_x); x += window_step_x)
284 store_result<T>(
reinterpret_cast<T *
>(out_ptr + x), vdeq);
288 for(; x < window_end_x; ++x)
290 int8_t val = *(in_ptr + x);
291 *(out_ptr + x) = static_cast<T>(
dequantize(val, scale));
297 template <
typename T>
298 void run_dequantization_qsymm16(
const ITensor *input, ITensor *output,
const Window &window)
300 const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
301 const float scale = qinfo.scale;
303 const int window_step_x = 8;
304 const auto window_start_x =
static_cast<int>(window.x().start());
305 const auto window_end_x =
static_cast<int>(window.x().end());
308 Window win_collapsed = window.collapse_if_possible(window,
Window::DimZ);
309 win_collapsed.set(
Window::DimX, Window::Dimension(0, 1, 1));
312 Iterator in(input, win_collapsed);
313 Iterator out(output, win_collapsed);
317 const auto in_ptr =
reinterpret_cast<const int16_t *
>(in.ptr());
318 const auto out_ptr =
reinterpret_cast<T *
>(out.ptr());
320 int x = window_start_x;
321 for(; x <= (window_end_x - window_step_x); x += window_step_x)
326 store_result<T>(
reinterpret_cast<T *
>(out_ptr + x), vdeq);
330 for(; x < window_end_x; ++x)
332 int16_t val = *(in_ptr + x);
339 template <
typename T>
340 void run_dequantization_core(
const ITensor *input, ITensor *output,
const Window &window)
342 switch(input->info()->data_type())
345 run_dequantization_qasymm8<T, uint8_t>(
input, output, window);
348 run_dequantization_qasymm8<T, int8_t>(
input, output, window);
351 input->info()->data_layout() ==
DataLayout::NHWC ? run_dequantization_qsymm8_per_channel_nhwc<T>(
input, output, window) : run_dequantization_qsymm8_per_channel_nchw<T>(input, output, window);
354 run_dequantization_qsymm8<T>(
input, output, window);
357 run_dequantization_qsymm16<T>(
input, output, window);
366 : _input(nullptr), _output(nullptr)
379 auto win_config = validate_and_configure_window(input->
info(), output->
info());
383 INEKernel::configure(std::get<1>(win_config));
402 run_dequantization_core<float>(_input, _output,
window);
404 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 406 run_dequantization_core<float16_t>(_input, _output,
window);
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
void configure(const ITensor *input, ITensor *output)
Set input, output tensors.
quantized, symmetric fixed-point 16-bit number
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
float32x4x2_t vdequantize(const uint8x8_t &qv, const UniformQuantizationInfo &qi)
Dequantize a neon vector holding 8 quantized values.
NEDequantizationLayerKernel()
Default constructor.
uint8x16_t vloadq(const uint8_t *ptr)
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
float32x4x2_t vdequantize_int16(const int16x8_t &qv, float scale)
Dequantize a neon vector holding 8 16-bit quantized values.
1 channel, 1 F32 per channel
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
Store the tensor's metadata.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Interface for Neon tensor.
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
static Status validate(const ITensorInfo *input, const ITensorInfo *output)
Static function to check if given info will lead to a valid configuration of NEDequantizationLayerKer...
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
float dequantize_qsymm16(int16_t value, const UniformQuantizationInfo &qinfo)
Dequantize a value given a 16-bit symmetric quantization scheme.
quantized, asymmetric fixed-point 8-bit number unsigned
UniformQuantizationInfo uniform() const
Return per layer quantization info.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
quantized, symmetric fixed-point 8-bit number
quantized, symmetric per channel fixed-point 8-bit number
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
const QuantizationInfo qinfo
Num samples, height, width, channels.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
void vstore(uint8_t *ptr, uint8x8_t val)
static float dequantize(QUANTIZED_TYPE value, const UniformQuantizationInfo &qinfo)
Dequantize a value given a 8-bit asymmetric quantization scheme.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
float dequantize(uint8_t value, float scale, int32_t offset)
Dequantize a value given an 8-bit asymmetric quantization scheme.
quantized, asymmetric fixed-point 8-bit number signed
Includes all wrapper headers at once.
Describe a multidimensional execution window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)