Compute Library
 23.08
CpuDequantizeKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/Utils.h"
31 #include "src/core/CPP/Validate.h"
32 #include "src/core/NEON/NEAsymm.h"
33 #include "src/core/NEON/NESymm.h"
37 
38 #include <arm_neon.h>
39 
40 namespace arm_compute
41 {
42 namespace cpu
43 {
44 namespace kernels
45 {
46 namespace
47 {
48 Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
49 {
52 
53  if(dst->tensor_shape().total_size() > 0)
54  {
58  }
59 
60  return Status{};
61 }
62 
63 template <typename T>
64 inline void store_result(T *ptr, const float32x4x4_t &v)
65 {
66  ARM_COMPUTE_UNUSED(ptr, v);
67 }
68 
69 template <>
70 inline void store_result<float>(float *ptr, const float32x4x4_t &v)
71 {
72  wrapper::vstore(ptr, v.val[0]);
73  wrapper::vstore(ptr + 4, v.val[1]);
74  wrapper::vstore(ptr + 8, v.val[2]);
75  wrapper::vstore(ptr + 12, v.val[3]);
76 }
77 
78 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
79 template <>
80 inline void store_result<float16_t>(float16_t *ptr, const float32x4x4_t &v)
81 {
82  wrapper::vstore(ptr, vcombine_f16(vcvt_f16_f32(v.val[0]), vcvt_f16_f32(v.val[1])));
83  wrapper::vstore(ptr + 8, vcombine_f16(vcvt_f16_f32(v.val[2]), vcvt_f16_f32(v.val[3])));
84 }
85 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
86 
87 template <typename T>
88 inline void store_result(T *ptr, const float32x4x2_t &v)
89 {
90  ARM_COMPUTE_UNUSED(ptr, v);
91 }
92 
93 template <>
94 inline void store_result<float>(float *ptr, const float32x4x2_t &v)
95 {
96  wrapper::vstore(ptr, v.val[0]);
97  wrapper::vstore(ptr + 4, v.val[1]);
98 }
99 
100 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
101 template <>
102 inline void store_result<float16_t>(float16_t *ptr, const float32x4x2_t &v)
103 {
104  wrapper::vstore(ptr, vcombine_f16(vcvt_f16_f32(v.val[0]), vcvt_f16_f32(v.val[1])));
105 }
106 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
107 
108 template <typename TOut, typename TIn>
109 void run_dequantization_qasymm8(const ITensor *input, ITensor *output, const Window &window)
110 {
111  const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
112  const float scale = qinfo.scale;
113  const int32_t offset = qinfo.offset;
114 
115  const int window_step_x = 16;
116  const auto window_start_x = static_cast<int>(window.x().start());
117  const auto window_end_x = static_cast<int>(window.x().end());
118 
119  // Collapse window and reset first dimension to handle tail calculations manually
120  Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
121  win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
122 
123  // Create iterators
124  Iterator in(input, win_collapsed);
125  Iterator out(output, win_collapsed);
126 
127  execute_window_loop(win_collapsed, [&](const Coordinates &)
128  {
129  const auto in_ptr = reinterpret_cast<const TIn *>(in.ptr());
130  const auto out_ptr = reinterpret_cast<TOut *>(out.ptr());
131 
132  int x = window_start_x;
133  for(; x <= (window_end_x - window_step_x); x += window_step_x)
134  {
135  const auto vin = wrapper::vloadq(in_ptr + x);
136  const auto vdeq = vdequantize(vin, scale, offset);
137 
138  store_result(reinterpret_cast<TOut *>(out_ptr + x), vdeq);
139  }
140 
141  // Compute left-over elements
142  for(; x < window_end_x; ++x)
143  {
144  auto val = *(in_ptr + x);
145  *(out_ptr + x) = static_cast<TOut>(Qasymm8QuantizationHelper<TIn>::dequantize(val, qinfo));
146  }
147  },
148  in, out);
149 }
150 
151 template <typename T>
152 void run_dequantization_qsymm8_per_channel_nchw(const ITensor *input, ITensor *output, const Window &window)
153 {
154  const auto scale = input->info()->quantization_info().scale();
155 
156  const int window_step_x = 16;
157  const auto window_start_x = static_cast<int>(window.x().start());
158  const auto window_end_x = static_cast<int>(window.x().end());
159 
160  // Reset first dimension to handle tail calculations manually
161  Window win(window);
162  win.set(Window::DimX, Window::Dimension(0, 1, 1));
163 
164  // Create iterators
165  Iterator in(input, win);
166  Iterator out(output, win);
167 
168  execute_window_loop(win, [&](const Coordinates & id)
169  {
170  const auto in_ptr = reinterpret_cast<const int8_t *>(in.ptr());
171  const auto out_ptr = reinterpret_cast<T *>(out.ptr());
172 
173  int x = window_start_x;
174  for(; x <= (window_end_x - window_step_x); x += window_step_x)
175  {
176  const auto vin = wrapper::vloadq(in_ptr + x);
177  const auto vdeq = vdequantize(vin, scale[id.z()]);
178 
179  store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
180  }
181 
182  // Compute left-over elements
183  for(; x < window_end_x; ++x)
184  {
185  int8_t val = *(in_ptr + x);
186  *(out_ptr + x) = static_cast<T>(dequantize(val, scale[id.z()]));
187  }
188  },
189  in, out);
190 }
191 
192 template <typename T>
193 void run_dequantization_qsymm8_per_channel_nhwc(const ITensor *input, ITensor *output, const Window &window)
194 {
195  const auto scale = input->info()->quantization_info().scale();
196 
197  const int window_step_x = 16;
198  const auto window_start_x = static_cast<int>(window.x().start());
199  const auto window_end_x = static_cast<int>(window.x().end());
200 
201  // Reset first dimension to handle tail calculations manually
202  Window win(window);
203  win.set(Window::DimX, Window::Dimension(0, 1, 1));
204 
205  // Create iterators
206  Iterator in(input, win);
207  Iterator out(output, win);
208 
209  execute_window_loop(win, [&](const Coordinates &)
210  {
211  const auto in_ptr = reinterpret_cast<const int8_t *>(in.ptr());
212  const auto out_ptr = reinterpret_cast<T *>(out.ptr());
213 
214  int x = window_start_x;
215  for(; x <= (window_end_x - window_step_x); x += window_step_x)
216  {
217  const float32x4x4_t vscale =
218  {
219  {
220  scale[x + 0], scale[x + 1], scale[x + 2], scale[x + 3],
221  scale[x + 4], scale[x + 5], scale[x + 6], scale[x + 7],
222  scale[x + 8], scale[x + 9], scale[x + 10], scale[x + 11],
223  scale[x + 12], scale[x + 13], scale[x + 14], scale[x + 15]
224  }
225  };
226  const auto vin = wrapper::vloadq(in_ptr + x);
227  const auto vdeq = vdequantize(vin, vscale);
228 
229  store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
230  }
231 
232  // Compute left-over elements
233  for(; x < window_end_x; ++x)
234  {
235  int8_t val = *(in_ptr + x);
236  *(out_ptr + x) = static_cast<T>(dequantize(val, scale[x]));
237  }
238  },
239  in, out);
240 }
241 
242 template <typename T>
243 void run_dequantization_qsymm8(const ITensor *input, ITensor *output, const Window &window)
244 {
245  const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
246  const float scale = qinfo.scale;
247 
248  const int window_step_x = 16;
249  const auto window_start_x = static_cast<int>(window.x().start());
250  const auto window_end_x = static_cast<int>(window.x().end());
251 
252  // Collapse window and reset first dimension to handle tail calculations manually
253  Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
254  win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
255 
256  // Create iterators
257  Iterator in(input, win_collapsed);
258  Iterator out(output, win_collapsed);
259 
260  execute_window_loop(win_collapsed, [&](const Coordinates &)
261  {
262  const auto in_ptr = reinterpret_cast<const int8_t *>(in.ptr());
263  const auto out_ptr = reinterpret_cast<T *>(out.ptr());
264 
265  int x = window_start_x;
266  for(; x <= (window_end_x - window_step_x); x += window_step_x)
267  {
268  const auto vin = wrapper::vloadq(in_ptr + x);
269  const auto vdeq = vdequantize(vin, scale);
270 
271  store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
272  }
273 
274  // Compute left-over elements
275  for(; x < window_end_x; ++x)
276  {
277  int8_t val = *(in_ptr + x);
278  *(out_ptr + x) = static_cast<T>(dequantize(val, scale));
279  }
280  },
281  in, out);
282 }
283 
284 template <typename T>
285 void run_dequantization_qsymm16(const ITensor *input, ITensor *output, const Window &window)
286 {
287  const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
288  const float scale = qinfo.scale;
289 
290  const int window_step_x = 8;
291  const auto window_start_x = static_cast<int>(window.x().start());
292  const auto window_end_x = static_cast<int>(window.x().end());
293 
294  // Collapse window and reset first dimension to handle tail calculations manually
295  Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
296  win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
297 
298  // Create iterators
299  Iterator in(input, win_collapsed);
300  Iterator out(output, win_collapsed);
301 
302  execute_window_loop(win_collapsed, [&](const Coordinates &)
303  {
304  const auto in_ptr = reinterpret_cast<const int16_t *>(in.ptr());
305  const auto out_ptr = reinterpret_cast<T *>(out.ptr());
306 
307  int x = window_start_x;
308  for(; x <= (window_end_x - window_step_x); x += window_step_x)
309  {
310  const auto vin = wrapper::vloadq(in_ptr + x);
311  const auto vdeq = vdequantize_int16(vin, scale);
312 
313  store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
314  }
315 
316  // Compute left-over elements
317  for(; x < window_end_x; ++x)
318  {
319  int16_t val = *(in_ptr + x);
320  *(out_ptr + x) = static_cast<T>(dequantize_qsymm16(val, scale));
321  }
322  },
323  in, out);
324 }
325 
326 template <typename T>
327 void run_dequantization_core(const ITensor *input, ITensor *output, const Window &window)
328 {
329  switch(input->info()->data_type())
330  {
331  case DataType::QASYMM8:
332  run_dequantization_qasymm8<T, uint8_t>(input, output, window);
333  break;
335  run_dequantization_qasymm8<T, int8_t>(input, output, window);
336  break;
338  input->info()->data_layout() == DataLayout::NHWC ? run_dequantization_qsymm8_per_channel_nhwc<T>(input, output, window) : run_dequantization_qsymm8_per_channel_nchw<T>(input, output, window);
339  break;
340  case DataType::QSYMM8:
341  run_dequantization_qsymm8<T>(input, output, window);
342  break;
343  case DataType::QSYMM16:
344  run_dequantization_qsymm16<T>(input, output, window);
345  break;
346  default:
347  ARM_COMPUTE_ERROR("Unsupported data type.");
348  }
349 }
350 } // namespace
351 
353 {
355 
356  // Configure kernel window
358 
359  // Output tensor auto initialization if not yet initialized
360  auto_init_if_empty(*dst, src->tensor_shape(), 1, DataType::F32);
361 
362  ICpuKernel::configure(win);
363 }
364 
366 {
368  return Status{};
369 }
370 
371 void CpuDequantizeKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
372 {
376 
377  const auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
378  auto dst = tensors.get_tensor(TensorType::ACL_DST);
379 
380  switch(dst->info()->data_type())
381  {
382  case DataType::F32:
383  run_dequantization_core<float>(src, dst, window);
384  break;
385 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
386  case DataType::F16:
387  run_dequantization_core<float16_t>(src, dst, window);
388  break;
389 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
390  default:
391  ARM_COMPUTE_ERROR("Unsupported data type.");
392  }
393 }
394 const char *CpuDequantizeKernel::name() const
395 {
396  return "CpuDequantizeKernel";
397 }
398 } // namespace kernels
399 } // namespace cpu
400 } // namespace arm_compute
arm_compute::Steps
Class to describe a number of elements in each dimension.
Definition: Steps.h:40
arm_compute::cpu::kernels::CpuDequantizeKernel::name
const char * name() const override
Name of the kernel.
Definition: CpuDequantizeKernel.cpp:394
arm_compute::DataType::QSYMM8_PER_CHANNEL
@ QSYMM8_PER_CHANNEL
quantized, symmetric per channel fixed-point 8-bit number
arm_compute::vdequantize
float32x4x2_t vdequantize(const uint8x8_t &qv, const UniformQuantizationInfo &qi)
Dequantize a neon vector holding 8 quantized values.
Definition: NEAsymm.h:417
arm_compute::test::validation::src
SimpleTensor< float > src
Definition: DFT.cpp:155
Helpers.h
arm_compute::calculate_max_window
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
Definition: WindowHelpers.cpp:28
arm_compute::DataLayout::NHWC
@ NHWC
Num samples, height, width, channels.
arm_compute::DataType::QASYMM8
@ QASYMM8
quantized, asymmetric fixed-point 8-bit number unsigned
arm_compute::test::validation::dst
auto dst
Definition: DFT.cpp:170
arm_compute::cpu::kernels::validate_arguments
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
Definition: CpuDirectConv2dKernel.cpp:60
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:1004
arm_compute::vdequantize_int16
float32x4x2_t vdequantize_int16(const int16x8_t &qv, float scale)
Dequantize a neon vector holding 8 16-bit quantized values.
Definition: NESymm.h:135
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:528
arm_compute::DataType::QSYMM8
@ QSYMM8
quantized, symmetric fixed-point 8-bit number
arm_compute::Window::DimX
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
Window.h
ARM_COMPUTE_ERROR
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:353
arm_compute::cpu::kernels::CpuDequantizeKernel::configure
void configure(const ITensorInfo *src, ITensorInfo *dst)
Set input, output tensors.
Definition: CpuDequantizeKernel.cpp:352
NEAsymm.h
NESymm.h
arm_compute::ITensorPack::get_tensor
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
Definition: ITensorPack.cpp:64
arm_compute::wrapper::vloadq
uint8x16_t vloadq(const uint8_t *ptr)
Definition: load.h:58
Error.h
wrapper.h
Includes all wrapper headers at once.
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:877
arm_compute::DataType::QSYMM16
@ QSYMM16
quantized, symmetric fixed-point 16-bit number
ARM_COMPUTE_RETURN_ON_ERROR
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
arm_compute::ITensorPack::get_const_tensor
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Definition: ITensorPack.cpp:54
ARM_COMPUTE_ERROR_THROW_ON
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:456
arm_compute::ITensorPack
Tensor packing service.
Definition: ITensorPack.h:39
arm_compute::ACL_DST
@ ACL_DST
Definition: Types.h:55
ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
Definition: Validate.h:115
arm_compute::auto_init_if_empty
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
Definition: AutoConfiguration.h:43
arm_compute::QuantizationInfo::scale
const std::vector< float > & scale() const
Scale vector accessor.
Definition: QuantizationInfo.h:123
arm_compute::Status
Status class.
Definition: Error.h:52
arm_compute::DataType::QASYMM8_SIGNED
@ QASYMM8_SIGNED
quantized, asymmetric fixed-point 8-bit number signed
WindowHelpers.h
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205
offset
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:1112
arm_compute::dequantize_qsymm16
float dequantize_qsymm16(int16_t value, const UniformQuantizationInfo &qinfo)
Dequantize a value given a 16-bit symmetric quantization scheme.
Definition: QuantizationInfo.h:458
arm_compute::Qasymm8QuantizationHelper::dequantize
static float dequantize(QUANTIZED_TYPE value, const UniformQuantizationInfo &qinfo)
Dequantize a value given a 8-bit asymmetric quantization scheme.
Definition: QuantizationInfo.h:272
ARM_COMPUTE_UNUSED
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
AutoConfiguration.h
arm_compute::IKernel::window
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
arm_compute::ThreadInfo
Information about executing thread and CPU.
Definition: CPPTypes.h:180
Utils.h
arm_compute::wrapper::vstore
void vstore(uint8_t *ptr, uint8x8_t val)
Definition: store.h:39
arm_compute::Window
Describe a multidimensional execution window.
Definition: Window.h:39
Validate.h
CpuDequantizeKernel.h
arm_compute::test::validation::scale
NEScale scale
Definition: Scale.cpp:272
arm_compute
Copyright (c) 2017-2023 Arm Limited.
Definition: introduction.dox:24
arm_compute::DataType::F16
@ F16
16-bit floating-point number
arm_compute::Window::DimZ
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
arm_compute::dequantize
float dequantize(uint8_t value, float scale, int32_t offset)
Dequantize a value given an 8-bit asymmetric quantization scheme.
Definition: QuantizationInfo.h:382
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
arm_compute::cpu::kernels::CpuDequantizeKernel::run_op
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
Definition: CpuDequantizeKernel.cpp:371
arm_compute::ACL_SRC
@ ACL_SRC
Definition: Types.h:44
arm_compute::ITensorInfo
Store the tensor's metadata.
Definition: ITensorInfo.h:43
arm_compute::DataType::F32
@ F32
32-bit floating-point number
arm_compute::cpu::kernels::CpuDequantizeKernel::validate
static Status validate(const ITensorInfo *src, const ITensorInfo *dst)
Static function to check if given info will lead to a valid configuration.
Definition: CpuDequantizeKernel.cpp:365
arm_compute::test::validation::info
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
arm_compute::execute_window_loop
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
Validate.h
arm_compute::test::validation::qinfo
const QuantizationInfo qinfo
Definition: Im2Col.cpp:155
arm_compute::test::validation::input
auto input
Definition: LSTMLayerQuantized.cpp:486
arm_compute::QuantizationInfo::offset
const std::vector< int32_t > & offset() const
Offset vector accessor.
Definition: QuantizationInfo.h:131