51 struct ScaleSelectorData
55 using ScaleSelectorPtr = std::add_pointer<bool(
const ScaleSelectorData &data)>
::type;
56 using ScaleKernelPtr = std::add_pointer<void(
const ITensor *, ITensor *,
const ITensor *,
const ITensor *,
const ITensor *,
65 static const ScaleKernel available_kernels[] =
67 #if defined(__ARM_FEATURE_SVE) 70 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::F16; },
75 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::F32; },
84 "qasymm8_signed_sve_scale",
90 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::U8; },
95 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::S16; },
99 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) 102 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::F16; },
108 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::F32; },
112 "qasymm8_neon_scale",
117 "qasymm8_signed_neon_scale",
123 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::U8; },
128 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::S16; },
140 const ScaleKernel *get_implementation(
const ScaleSelectorData &data)
142 for(
const auto &uk : available_kernels)
144 if(uk.is_selected(data))
153 const ITensorInfo *offsets, ITensorInfo *
dst,
const ScaleKernelInfo &
info)
155 const auto *uk = get_implementation(ScaleSelectorData{
src->data_type() });
168 const auto output_width =
dst->dimension(width_index);
169 const auto output_height =
dst->dimension(height_index);
181 if(dx !=
nullptr && dy !=
nullptr)
201 : _func(nullptr), _policy(), _border_mode(), _constant_border_value(
PixelValue()), _sampling_offset(0), _align_corners(false), _data_layout(
DataLayout::
UNKNOWN)
230 _sampling_offset = 0.5f;
246 #ifdef ENABLE_NCHW_KERNELS 250 std::string function_to_call(
"scale_");
255 static std::map<std::string, ScaleFunctionPtr> map_function =
257 {
"scale_U8_NCHW_AREA_CONSTANT", &CpuScaleKernel::scale_area_nchw_u8 },
259 {
"scale_U8_NCHW_BILINEAR", &CpuScaleKernel::scale_bilinear_nchw<uint8_t> },
260 {
"scale_U8_NCHW_NEAREST_NEIGHBOUR", &CpuScaleKernel::scale_nearest_nchw<uint8_t> },
262 {
"scale_QASYMM8_NCHW_BILINEAR", &CpuScaleKernel::scale_bilinear_qasymm<uint8_t> },
263 {
"scale_QASYMM8_NCHW_NEAREST_NEIGHBOUR", &CpuScaleKernel::scale_nearest_nchw<uint8_t> },
265 {
"scale_QASYMM8_SIGNED_NCHW_BILINEAR", &CpuScaleKernel::scale_bilinear_qasymm<int8_t> },
266 {
"scale_QASYMM8_SIGNED_NCHW_NEAREST_NEIGHBOUR", &CpuScaleKernel::scale_nearest_nchw<int8_t> },
268 {
"scale_S16_NCHW_BILINEAR", &CpuScaleKernel::scale_bilinear_nchw<int16_t> },
269 {
"scale_S16_NCHW_NEAREST_NEIGHBOUR", &CpuScaleKernel::scale_nearest_nchw<int16_t> },
271 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 272 {
"scale_F16_NCHW_BILINEAR", &CpuScaleKernel::scale_bilinear_nchw<float16_t> },
273 {
"scale_F16_NCHW_NEAREST_NEIGHBOUR", &CpuScaleKernel::scale_nearest_nchw<float16_t> },
276 {
"scale_F32_NCHW_BILINEAR", &CpuScaleKernel::scale_bilinear_nchw<float> },
277 {
"scale_F32_NCHW_NEAREST_NEIGHBOUR", &CpuScaleKernel::scale_nearest_nchw<float> },
279 auto it = map_function.find(function_to_call);
280 if(it != map_function.end())
285 #endif // ENABLE_NCHW_KERNELS 289 ICpuKernel::configure(win);
292 #ifdef ENABLE_NCHW_KERNELS 293 template <
typename T>
297 const size_t in_stride_x =
src->info()->dimension(0) +
src->info()->padding().left +
src->info()->padding().right;
320 Iterator offsets_i(offsets, win_off);
323 const auto offsets_ptr = reinterpret_cast<const int32_t *>(offsets_i.ptr());
325 id.y() + _sampling_offset)
327 const int32_t offset_row = in_yi * in_stride_x;
328 *reinterpret_cast<T *>(dst_i.ptr()) = *(reinterpret_cast<const T *>(src_i.ptr()) + offsets_ptr[0] + offset_row);
330 src_i, offsets_i, dst_i);
333 template <
typename T>
348 for(
size_t d =
Window::DimZ; d < offsets->info()->num_dimensions(); ++d)
350 win_off.set(d, Window::Dimension(0, 0, 0));
353 Iterator src_i(
src, win_in);
355 Iterator offsets_i(offsets, win_off);
356 Iterator dx_i(dx, win_off);
357 Iterator dy_i(dy, win_off);
359 const int32_t in_dim_w =
src->info()->dimension(0);
360 const int32_t in_dim_h =
src->info()->dimension(1);
361 const int32_t in_stride_w = in_dim_w +
src->info()->padding().left +
src->info()->padding().right;
365 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 366 using ConstType =
typename std::conditional<std::is_same<T, float16_t>::value,
half, T>
::type;
370 const T const_border_value = static_cast<T>(_constant_border_value.
get<ConstType>());
373 const int32_t index_h = std::floor((
id.y() + _sampling_offset) * hr - _sampling_offset);
374 const auto index_w = *(reinterpret_cast<const int32_t *>(offsets_i.ptr()));
375 const auto dx_val = *(reinterpret_cast<const float *>(dx_i.ptr()));
376 const auto dy_val = *(reinterpret_cast<const float *>(dy_i.ptr()));
377 const auto pixel_row_ptr = reinterpret_cast<const T *>(src_i.ptr());
379 const auto a00 = (0 <= index_w && index_w < in_dim_w && 0 <= index_h && index_h < in_dim_h) ? (*(pixel_row_ptr + index_w + index_h * in_stride_w)) : const_border_value;
380 const auto a01 = (-1 <= index_w && index_w < in_dim_w - 1 && 0 <= index_h && index_h < in_dim_h) ? (*(pixel_row_ptr + index_w + 1 + index_h * in_stride_w)) : const_border_value;
381 const auto a10 = (0 <= index_w && index_w < in_dim_w && -1 <= index_h
382 && index_h < in_dim_h - 1) ?
383 (*(pixel_row_ptr + index_w + index_h * in_stride_w + in_stride_w)) :
385 const auto a11 = (-1 <= index_w && index_w < in_dim_w - 1 && -1 <= index_h
386 && index_h < in_dim_h - 1) ?
387 (*(pixel_row_ptr + index_w + 1 + index_h * in_stride_w + in_stride_w)) :
392 src_i, offsets_i, dx_i, dy_i, dst_i);
398 const int index_h = std::floor((
id.y() + _sampling_offset) * hr - _sampling_offset);
399 const auto index_w = *(reinterpret_cast<const int32_t *>(offsets_i.ptr()));
400 const auto dx_val = *(reinterpret_cast<const float *>(dx_i.ptr()));
401 const auto dy_val = *(reinterpret_cast<const float *>(dy_i.ptr()));
402 const auto pixel_row_ptr = reinterpret_cast<const T *>(src_i.ptr());
404 auto clamped_x = utility::clamp<int>(index_w, 0, in_dim_w - 1);
405 auto clamped_x1 = utility::clamp<int>(index_w + 1, 0, in_dim_w - 1);
406 auto clamped_y = utility::clamp<int>(index_h, 0, in_dim_h - 1);
407 auto clamped_y1 = utility::clamp<int>(index_h + 1, 0, in_dim_h - 1);
409 const auto a00 = *(pixel_row_ptr + clamped_x + clamped_y * in_stride_w);
410 const auto a01 = *(pixel_row_ptr + clamped_x1 + clamped_y * in_stride_w);
411 const auto a10 = *(pixel_row_ptr + clamped_x + clamped_y1 * in_stride_w);
412 const auto a11 = *(pixel_row_ptr + clamped_x1 + clamped_y1 * in_stride_w);
416 src_i, offsets_i, dx_i, dy_i, dst_i);
424 void CpuScaleKernel::scale_area_nchw_u8(
const ITensor *
src, ITensor *
dst,
const ITensor *dx,
const ITensor *dy,
const ITensor *offsets,
const Window &window)
427 using namespace scale_helpers;
438 Iterator src_i(
src, win_in);
443 const auto w =
src->info()->dimension(0);
444 const auto h =
src->info()->dimension(1);
445 const size_t in_stride =
src->info()->strides_in_bytes()[1];
449 const auto in_ptr = reinterpret_cast<const uint8_t *>(src_i.ptr());
451 uint8x8_t tmp0 = vdup_n_u8(0);
461 uint8x8_t tmp1 = vdup_n_u8(0);
464 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride,
w, h, wr, hr,
id.x() + 10,
id.y()), tmp1, 2);
465 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride,
w, h, wr, hr,
id.x() + 11,
id.y()), tmp1, 3);
466 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride,
w, h, wr, hr,
id.x() + 12,
id.y()), tmp1, 4);
467 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride,
w, h, wr, hr,
id.x() + 13,
id.y()), tmp1, 5);
468 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride,
w, h, wr, hr,
id.x() + 14,
id.y()), tmp1, 6);
469 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride,
w, h, wr, hr,
id.x() + 15,
id.y()), tmp1, 7);
471 vst1q_u8(dst_i.ptr(), vcombine_u8(tmp0, tmp1));
476 template <
typename T>
477 void CpuScaleKernel::scale_bilinear_qasymm(
const ITensor *
src, ITensor *
dst,
const ITensor *dx,
const ITensor *dy,
const ITensor *offsets,
const Window &
window)
492 win_in.set(
idx_width, Window::Dimension(0, 0, 0));
493 win_in.set(
idx_height, Window::Dimension(0, 0, 0));
495 for(
size_t d =
Window::DimZ; d < offsets->info()->num_dimensions(); ++d)
497 win_off.set(d, Window::Dimension(0, 0, 0));
500 Iterator src_i(
src, win_in);
503 const int32_t in_dim_w =
src->info()->dimension(
idx_width);
505 const int32_t stride_w =
src->info()->strides_in_bytes()[
idx_width];
506 const int32_t stride_h =
src->info()->strides_in_bytes()[
idx_height];
508 const UniformQuantizationInfo iq_info =
src->info()->quantization_info().uniform();
509 const UniformQuantizationInfo oq_info =
dst->info()->quantization_info().uniform();
513 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 514 using ConstType =
typename std::conditional<std::is_same<T, float16_t>::value,
half, T>
::type;
518 const T const_border_value = static_cast<T>(_constant_border_value.
get<ConstType>());
521 const int32_t index_h = std::floor((
id[
idx_height] + _sampling_offset) * hr - _sampling_offset);
522 const int32_t index_w = *(reinterpret_cast<const int32_t *>(offsets->ptr_to_element(Coordinates(
id[
idx_width],
id[
idx_height]))));
523 const auto dx_val = *(reinterpret_cast<const float *>(dx->ptr_to_element(Coordinates(
id[
idx_width],
id[
idx_height]))));
524 const auto dy_val = *(reinterpret_cast<const float *>(dy->ptr_to_element(Coordinates(
id[
idx_width],
id[
idx_height]))));
525 const auto pixel_row_ptr = reinterpret_cast<const T *>(src_i.ptr());
527 const auto a00 = (0 <= index_w && index_w < in_dim_w && 0 <= index_h && index_h < in_dim_h) ?
528 (*(pixel_row_ptr + index_w * stride_w + index_h * stride_h)) :
530 const auto a01 = (-1 <= index_w && index_w < in_dim_w - 1 && 0 <= index_h && index_h < in_dim_h) ?
531 (*(pixel_row_ptr + (index_w + 1) * stride_w + index_h * stride_h)) :
533 const auto a10 = (0 <= index_w && index_w < in_dim_w && -1 <= index_h && index_h < in_dim_h - 1) ?
534 (*(pixel_row_ptr + index_w * stride_w + (index_h + 1) * stride_h)) :
536 const auto a11 = (-1 <= index_w && index_w < in_dim_w - 1 && -1 <= index_h && index_h < in_dim_h - 1) ?
537 (*(pixel_row_ptr + (index_w + 1) * stride_w + (index_h + 1) * stride_h)) :
552 const int index_h = std::floor((
id[
idx_height] + _sampling_offset) * hr - _sampling_offset);
553 const int32_t index_w = *(reinterpret_cast<const int32_t *>(offsets->ptr_to_element(Coordinates(
id[
idx_width],
id[
idx_height]))));
554 const auto dx_val = *(reinterpret_cast<const float *>(dx->ptr_to_element(Coordinates(
id[
idx_width],
id[
idx_height]))));
555 const auto dy_val = *(reinterpret_cast<const float *>(dy->ptr_to_element(Coordinates(
id[
idx_width],
id[
idx_height]))));
556 const auto pixel_row_ptr = reinterpret_cast<const T *>(src_i.ptr());
558 auto clamped_w = utility::clamp<int>(index_w, 0, in_dim_w - 1);
559 auto clamped_w1 = utility::clamp<int>(index_w + 1, 0, in_dim_w - 1);
560 auto clamped_h = utility::clamp<int>(index_h, 0, in_dim_h - 1);
561 auto clamped_h1 = utility::clamp<int>(index_h + 1, 0, in_dim_h - 1);
563 const auto a00 = *(pixel_row_ptr + clamped_w * stride_w + clamped_h * stride_h);
564 const auto a01 = *(pixel_row_ptr + clamped_w1 * stride_w + clamped_h * stride_h);
565 const auto a10 = *(pixel_row_ptr + clamped_w * stride_w + clamped_h1 * stride_h);
566 const auto a11 = *(pixel_row_ptr + clamped_w1 * stride_w + clamped_h1 * stride_h);
581 #endif // ENABLE_NCHW_KERNELS 609 const auto *uk = get_implementation(ScaleSelectorData{
src->info()->data_type() });
610 uk->ukernel(
src,
dst, offsets, dx, dy, _policy, _border_mode, _constant_border_value, _sampling_offset, _align_corners,
window);
616 return "CpuScaleKernel";
BorderMode
Methods available to handle borders.
void s16_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
BorderMode border_mode
Border mode policy.
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Class describing the value of a pixel for any image format.
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
InterpolationPolicy
Interpolation method.
const Window & window() const
The maximum window the kernel can be executed on.
void qasymm8_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
__kernel void scale_bilinear_nchw(__global uchar *in_ptr, uint in_stride_x, uint in_step_x, uint in_stride_y, uint in_step_y, uint in_offset_first_element_in_bytes, __global uchar *out_ptr, uint out_stride_x, uint out_step_x, uint out_stride_y, uint out_step_y, uint out_offset_first_element_in_bytes, const float input_width, const float input_height, const float scale_x, const float scale_y)
Performs an affine transformation on an image interpolating with the BILINEAR method.
InterpolationPolicy interpolation_policy
Interpolation type to use.
#define REGISTER_FP16_NEON(func_name)
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
1 channel, 1 U8 per channel
#define REGISTER_FP32_NEON(func_name)
void get(uint8_t &v) const
Interpret the pixel value as a U8.
void qasymm8_signed_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
half_float::half half
16-bit floating point type
1 channel, 1 F32 per channel
Output values are defined by bilinear interpolation between the pixels.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
#define REGISTER_QASYMM8_SIGNED_NEON(func_name)
const DataLayout data_layout
bool align_corners
Align corners of input and output.
Store the tensor's metadata.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
void fp16_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
Describe one of the image's dimensions with a start, end and step.
bool use_padding
Indication of using padding.
T round_half_away_from_zero(T value)
Round floating-point value with half value rounding away from zero.
Output values are defined to match the source pixel whose center is nearest to the sample position.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
decltype(strategy::transforms) typedef type
Interface for CPU tensor.
SimpleTensor< float > src
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
Samples are taken at pixel center.
bool is_align_corners_allowed_sampling_policy(SamplingPolicy sampling_policy)
Returns if aligned corners are allowed for the given sampling policy.
#define REGISTER_INTEGER_NEON(func_name)
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
1 channel, 1 S32 per channel
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
void u8_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
#define REGISTER_QASYMM8_NEON(func_name)
SamplingPolicy sampling_policy
Sampling policy used by the interpolation.
quantized, asymmetric fixed-point 8-bit number unsigned
Class to describe a number of elements in each dimension.
uint8_t pixel_area_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float wr, float hr, int x, int y)
Return the pixel at (x,y) using area interpolation by clamping when out of borders.
void qasymm8_signed_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
const std::string & string_from_interpolation_policy(InterpolationPolicy policy)
Translates a given interpolation policy to a string.
void fp32_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
Samples are taken at pixel top left corner.
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
1 channel, 1 S16 per channel
Output values are determined by averaging the source pixels whose areas fall under the area of the de...
CpuScaleKernel()
Default constructor.
Num samples, channels, height, width.
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
const std::string & string_from_data_layout(DataLayout dl)
Convert a data layout identity into a string.
Information about executing thread and CPU.
const ScaleSelectorPtr is_selected
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Borders are left undefined.
Pixels outside the image are assumed to have the same value as the closest image pixel.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
constexpr const Dimension & y() const
Alias to access the second dimension of the window.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
void configure(const ITensorInfo *src, const ITensorInfo *dx, const ITensorInfo *dy, const ITensorInfo *offsets, ITensorInfo *dst, const ScaleKernelInfo &info)
Initialise the kernel's inputs, output and interpolation policy.
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
static Status validate(const ITensorInfo *src, const ITensorInfo *dx, const ITensorInfo *dy, const ITensorInfo *offsets, ITensorInfo *dst, const ScaleKernelInfo &info)
Static function to check if given info will lead to a valid configuration of CpuScaleKernel.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
static float dequantize(QUANTIZED_TYPE value, const UniformQuantizationInfo &qinfo)
Dequantize a value given a 8-bit asymmetric quantization scheme.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
PixelValue constant_border_value
Constant value to use for constant border mode policy.
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
float calculate_resize_ratio(size_t input_size, size_t output_size, bool align_corners=false)
Returns resize ratio between input and output with consideration of aligned corners.
quantized, asymmetric fixed-point 8-bit number signed
Includes all wrapper headers at once.
size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get the index of the given dimension.
DataLayout data_layout
Data layout to use.
Iterator updated by execute_window_loop for each window element.
float delta_bilinear(float a00, float a01, float a10, float a11, float dx_val, float dy_val)
Computes bilinear interpolation using the top-left, top-right, bottom-left, bottom-right pixels and t...
DataType
Available data types.
static QUANTIZED_TYPE quantize(float value, const UniformQuantizationInfo &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given a 8-bit asymmetric quantization scheme.
DataLayout
[DataLayout enum definition]
const char * name() const override
Name of the kernel.
void qasymm8_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
Describe a multidimensional execution window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
constexpr const Dimension & x() const
Alias to access the first dimension of the window.