47 struct ScaleSelectorData
52 using ScaleKernelPtr = std::add_pointer<void(
const ITensor *, ITensor *,
const ITensor *,
const ITensor *,
const ITensor *,
61 static const ScaleKernel available_kernels[] =
63 #if defined(__ARM_FEATURE_SVE) 66 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::F16; },
71 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::F32; },
80 "qasymm8_signed_sve_scale",
86 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::U8; },
91 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::S16; },
95 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) 98 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::F16; },
104 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::F32; },
108 "qasymm8_neon_scale",
113 "qasymm8_signed_neon_scale",
119 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::U8; },
124 [](
const ScaleSelectorData & data) {
return data.dt ==
DataType::S16; },
136 const ScaleKernel *get_implementation(
const ScaleSelectorData &data)
138 for(
const auto &uk : available_kernels)
140 if(uk.is_selected(data))
149 const ITensorInfo *offsets, ITensorInfo *output,
const ScaleKernelInfo &
info)
151 const auto *uk = get_implementation(ScaleSelectorData{ input->data_type() });
164 const auto output_width = output->dimension(width_index);
165 const auto output_height = output->dimension(height_index);
194 : _func(nullptr), _offsets(nullptr), _dx(nullptr), _dy(nullptr), _input(nullptr), _output(nullptr), _policy(), _border_mode(), _constant_border_value(
PixelValue()), _sampling_offset(0),
205 dx !=
nullptr ? dx->
info() :
nullptr,
206 dy !=
nullptr ? dy->
info() :
nullptr,
207 offsets !=
nullptr ? offsets->
info() :
nullptr,
228 _sampling_offset = 0.5f;
247 std::string function_to_call(
"scale_");
252 static std::map<std::string, ScaleFunctionPtr> map_function =
254 {
"scale_U8_NCHW_AREA_CONSTANT", &NEScaleKernel::scale_area_nchw_u8 },
256 {
"scale_U8_NCHW_BILINEAR", &NEScaleKernel::scale_bilinear_nchw<uint8_t> },
257 {
"scale_U8_NCHW_NEAREST_NEIGHBOUR", &NEScaleKernel::scale_nearest_nchw<uint8_t> },
259 {
"scale_QASYMM8_NCHW_BILINEAR", &NEScaleKernel::scale_bilinear_qasymm<uint8_t> },
260 {
"scale_QASYMM8_NCHW_NEAREST_NEIGHBOUR", &NEScaleKernel::scale_nearest_nchw<uint8_t> },
262 {
"scale_QASYMM8_SIGNED_NCHW_BILINEAR", &NEScaleKernel::scale_bilinear_qasymm<int8_t> },
263 {
"scale_QASYMM8_SIGNED_NCHW_NEAREST_NEIGHBOUR", &NEScaleKernel::scale_nearest_nchw<int8_t> },
265 {
"scale_S16_NCHW_BILINEAR", &NEScaleKernel::scale_bilinear_nchw<int16_t> },
266 {
"scale_S16_NCHW_NEAREST_NEIGHBOUR", &NEScaleKernel::scale_nearest_nchw<int16_t> },
268 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 269 {
"scale_F16_NCHW_BILINEAR", &NEScaleKernel::scale_bilinear_nchw<float16_t> },
270 {
"scale_F16_NCHW_NEAREST_NEIGHBOUR", &NEScaleKernel::scale_nearest_nchw<float16_t> },
273 {
"scale_F32_NCHW_BILINEAR", &NEScaleKernel::scale_bilinear_nchw<float> },
274 {
"scale_F32_NCHW_NEAREST_NEIGHBOUR", &NEScaleKernel::scale_nearest_nchw<float> },
276 auto it = map_function.find(function_to_call);
277 if(it != map_function.end())
288 INEKernel::configure(win);
291 template <
typename T>
292 void NEScaleKernel::scale_nearest_nchw(
const Window &
window)
317 Iterator offsets(_offsets, win_off);
320 const auto offsets_ptr =
reinterpret_cast<const int32_t *
>(offsets.
ptr());
322 const int32_t offset_row = in_yi * in_stride_x;
323 *
reinterpret_cast<T *
>(out.
ptr()) = *(reinterpret_cast<const T *>(in.
ptr()) + offsets_ptr[0] + offset_row);
328 template <
typename T>
329 void NEScaleKernel::scale_bilinear_nchw(
const Window &window)
350 Iterator offsets(_offsets, win_off);
360 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 361 using ConstType =
typename std::conditional<std::is_same<T, float16_t>::value,
half, T>
::type;
365 const T const_border_value =
static_cast<T
>(_constant_border_value.
get<ConstType>());
368 const int32_t index_h = std::floor((
id.y() + _sampling_offset) * hr - _sampling_offset);
369 const auto index_w = *(
reinterpret_cast<const int32_t *
>(offsets.
ptr()));
370 const auto dx_val = *(
reinterpret_cast<const float *
>(dx.
ptr()));
371 const auto dy_val = *(
reinterpret_cast<const float *
>(dy.
ptr()));
372 const auto pixel_row_ptr =
reinterpret_cast<const T *
>(in.
ptr());
374 const auto a00 = (0 <= index_w && index_w < in_dim_w && 0 <= index_h && index_h < in_dim_h) ? (*(pixel_row_ptr + index_w + index_h * in_stride_w)) : const_border_value;
375 const auto a01 = (-1 <= index_w && index_w < in_dim_w - 1 && 0 <= index_h && index_h < in_dim_h) ? (*(pixel_row_ptr + index_w + 1 + index_h * in_stride_w)) : const_border_value;
376 const auto a10 = (0 <= index_w && index_w < in_dim_w && -1 <= index_h
377 && index_h < in_dim_h - 1) ?
378 (*(pixel_row_ptr + index_w + index_h * in_stride_w + in_stride_w)) :
380 const auto a11 = (-1 <= index_w && index_w < in_dim_w - 1 && -1 <= index_h
381 && index_h < in_dim_h - 1) ?
382 (*(pixel_row_ptr + index_w + 1 + index_h * in_stride_w + in_stride_w)) :
387 in, offsets, dx, dy, out);
393 const int index_h = std::floor((
id.y() + _sampling_offset) * hr - _sampling_offset);
394 const auto index_w = *(
reinterpret_cast<const int32_t *
>(offsets.
ptr()));
395 const auto dx_val = *(
reinterpret_cast<const float *
>(dx.
ptr()));
396 const auto dy_val = *(
reinterpret_cast<const float *
>(dy.
ptr()));
397 const auto pixel_row_ptr =
reinterpret_cast<const T *
>(in.
ptr());
399 auto clamped_x = utility::clamp<int>(index_w, 0, in_dim_w - 1);
400 auto clamped_x1 = utility::clamp<int>(index_w + 1, 0, in_dim_w - 1);
401 auto clamped_y = utility::clamp<int>(index_h, 0, in_dim_h - 1);
402 auto clamped_y1 = utility::clamp<int>(index_h + 1, 0, in_dim_h - 1);
404 const auto a00 = *(pixel_row_ptr + clamped_x + clamped_y * in_stride_w);
405 const auto a01 = *(pixel_row_ptr + clamped_x1 + clamped_y * in_stride_w);
406 const auto a10 = *(pixel_row_ptr + clamped_x + clamped_y1 * in_stride_w);
407 const auto a11 = *(pixel_row_ptr + clamped_x1 + clamped_y1 * in_stride_w);
411 in, offsets, dx, dy, out);
419 void NEScaleKernel::scale_area_nchw_u8(
const Window &window)
421 using namespace scale_helpers;
437 const auto w = _input->info()->dimension(0);
438 const auto h = _input->info()->dimension(1);
439 const size_t in_stride = _input->info()->strides_in_bytes()[1];
443 const auto in_ptr =
reinterpret_cast<const uint8_t *
>(in.
ptr());
445 uint8x8_t tmp0 = vdup_n_u8(0);
446 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x(),
id.y()), tmp0, 0);
447 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 1,
id.y()), tmp0, 1);
448 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 2,
id.y()), tmp0, 2);
449 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 3,
id.y()), tmp0, 3);
450 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 4,
id.y()), tmp0, 4);
451 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 5,
id.y()), tmp0, 5);
452 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 6,
id.y()), tmp0, 6);
453 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 7,
id.y()), tmp0, 7);
455 uint8x8_t tmp1 = vdup_n_u8(0);
456 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 8,
id.y()), tmp1, 0);
457 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 9,
id.y()), tmp1, 1);
458 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 10,
id.y()), tmp1, 2);
459 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 11,
id.y()), tmp1, 3);
460 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 12,
id.y()), tmp1, 4);
461 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 13,
id.y()), tmp1, 5);
462 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 14,
id.y()), tmp1, 6);
463 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 15,
id.y()), tmp1, 7);
465 vst1q_u8(out.
ptr(), vcombine_u8(tmp0, tmp1));
470 template <
typename T>
471 void NEScaleKernel::scale_bilinear_qasymm(
const Window &window)
489 for(
size_t d =
Window::DimZ; d < _offsets->info()->num_dimensions(); ++d)
497 const int32_t in_dim_w = _input->info()->dimension(idx_width);
498 const int32_t in_dim_h = _input->info()->dimension(idx_height);
499 const int32_t stride_w = _input->info()->strides_in_bytes()[
idx_width];
500 const int32_t stride_h = _input->info()->strides_in_bytes()[
idx_height];
507 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 508 using ConstType =
typename std::conditional<std::is_same<T, float16_t>::value,
half, T>
::type;
512 const T const_border_value =
static_cast<T
>(_constant_border_value.get<ConstType>());
515 const int32_t index_h = std::floor((
id[idx_height] + _sampling_offset) * hr - _sampling_offset);
516 const int32_t index_w = *(
reinterpret_cast<const int32_t *
>(_offsets->ptr_to_element(
Coordinates(
id[idx_width],
id[idx_height]))));
517 const auto dx_val = *(
reinterpret_cast<const float *
>(_dx->ptr_to_element(
Coordinates(
id[idx_width],
id[idx_height]))));
518 const auto dy_val = *(
reinterpret_cast<const float *
>(_dy->ptr_to_element(
Coordinates(
id[idx_width],
id[idx_height]))));
519 const auto pixel_row_ptr =
reinterpret_cast<const T *
>(in.
ptr());
521 const auto a00 = (0 <= index_w && index_w < in_dim_w && 0 <= index_h && index_h < in_dim_h) ?
522 (*(pixel_row_ptr + index_w * stride_w + index_h * stride_h)) :
524 const auto a01 = (-1 <= index_w && index_w < in_dim_w - 1 && 0 <= index_h && index_h < in_dim_h) ?
525 (*(pixel_row_ptr + (index_w + 1) * stride_w + index_h * stride_h)) :
527 const auto a10 = (0 <= index_w && index_w < in_dim_w && -1 <= index_h && index_h < in_dim_h - 1) ?
528 (*(pixel_row_ptr + index_w * stride_w + (index_h + 1) * stride_h)) :
530 const auto a11 = (-1 <= index_w && index_w < in_dim_w - 1 && -1 <= index_h && index_h < in_dim_h - 1) ?
531 (*(pixel_row_ptr + (index_w + 1) * stride_w + (index_h + 1) * stride_h)) :
546 const int index_h = std::floor((
id[idx_height] + _sampling_offset) * hr - _sampling_offset);
547 const int32_t index_w = *(
reinterpret_cast<const int32_t *
>(_offsets->ptr_to_element(
Coordinates(
id[idx_width],
id[idx_height]))));
548 const auto dx_val = *(
reinterpret_cast<const float *
>(_dx->ptr_to_element(
Coordinates(
id[idx_width],
id[idx_height]))));
549 const auto dy_val = *(
reinterpret_cast<const float *
>(_dy->ptr_to_element(
Coordinates(
id[idx_width],
id[idx_height]))));
550 const auto pixel_row_ptr =
reinterpret_cast<const T *
>(in.
ptr());
552 auto clamped_w = utility::clamp<int>(index_w, 0, in_dim_w - 1);
553 auto clamped_w1 = utility::clamp<int>(index_w + 1, 0, in_dim_w - 1);
554 auto clamped_h = utility::clamp<int>(index_h, 0, in_dim_h - 1);
555 auto clamped_h1 = utility::clamp<int>(index_h + 1, 0, in_dim_h - 1);
557 const auto a00 = *(pixel_row_ptr + clamped_w * stride_w + clamped_h * stride_h);
558 const auto a01 = *(pixel_row_ptr + clamped_w1 * stride_w + clamped_h * stride_h);
559 const auto a10 = *(pixel_row_ptr + clamped_w * stride_w + clamped_h1 * stride_h);
560 const auto a11 = *(pixel_row_ptr + clamped_w1 * stride_w + clamped_h1 * stride_h);
592 (this->*_func)(window);
596 const auto *uk = get_implementation(ScaleSelectorData{ _input->info()->data_type() });
597 uk->ukernel(_input, _output, _offsets, _dx, _dy, _policy, _border_mode, _constant_border_value, _sampling_offset, _align_corners, window);
BorderMode
Methods available to handle borders.
void s16_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
BorderMode border_mode
Border mode policy.
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Class describing the value of a pixel for any image format.
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
InterpolationPolicy
Interpolation method.
const Window & window() const
The maximum window the kernel can be executed on.
void qasymm8_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
InterpolationPolicy interpolation_policy
Interpolation type to use.
#define REGISTER_FP16_NEON(func_name)
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
1 channel, 1 U8 per channel
#define REGISTER_FP32_NEON(func_name)
void get(uint8_t &v) const
Interpret the pixel value as a U8.
void qasymm8_signed_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
half_float::half half
16-bit floating point type
1 channel, 1 F32 per channel
Output values are defined by bilinear interpolation between the pixels.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
#define REGISTER_QASYMM8_SIGNED_NEON(func_name)
const DataLayout data_layout
bool align_corners
Align corners of input and output.
Store the tensor's metadata.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
void fp16_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
Describe one of the image's dimensions with a start, end and step.
T round_half_away_from_zero(T value)
Round floating-point value with half value rounding away from zero.
Output values are defined to match the source pixel whose center is nearest to the sample position...
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
decltype(strategy::transforms) typedef type
Interface for Neon tensor.
Copyright (c) 2017-2021 Arm Limited.
virtual void set_valid_region(const ValidRegion &valid_region)=0
Set the valid region of the tensor.
1 channel, 1 F16 per channel
Samples are taken at pixel center.
bool is_align_corners_allowed_sampling_policy(SamplingPolicy sampling_policy)
Returns if aligned corners are allowed for the given sampling policy.
#define REGISTER_INTEGER_NEON(func_name)
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
1 channel, 1 S32 per channel
static Status validate(const ITensorInfo *input, const ITensorInfo *dx, const ITensorInfo *dy, const ITensorInfo *offsets, ITensorInfo *output, const ScaleKernelInfo &info)
Static function to check if given info will lead to a valid configuration of NEScaleKernel.
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
void u8_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
const ScaleSelectorPtr is_selected
#define REGISTER_QASYMM8_NEON(func_name)
SamplingPolicy sampling_policy
Sampling policy used by the interpolation.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
Class to describe a number of elements in each dimension.
uint8_t pixel_area_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float wr, float hr, int x, int y)
Return the pixel at (x,y) using area interpolation by clamping when out of borders.
void qasymm8_signed_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
const std::string & string_from_interpolation_policy(InterpolationPolicy policy)
Translates a given interpolation policy to a string.
void fp32_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
Samples are taken at pixel top left corner.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
void configure(const ITensor *input, const ITensor *dx, const ITensor *dy, const ITensor *offsets, ITensor *output, const ScaleKernelInfo &info)
Initialise the kernel's inputs, output and interpolation policy.
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
virtual PaddingSize padding() const =0
Padding of tensor.
unsigned int left
left of the border
unsigned int right
right of the border
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
1 channel, 1 S16 per channel
Output values are determined by averaging the source pixels whose areas fall under the area of the de...
Num samples, channels, height, width.
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
const std::string & string_from_data_layout(DataLayout dl)
Convert a data layout identity into a string.
Information about executing thread and CPU.
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Borders are left undefined.
Pixels outside the image are assumed to have the same value as the closest image pixel.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
constexpr const Dimension & y() const
Alias to access the second dimension of the window.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
static float dequantize(QUANTIZED_TYPE value, const UniformQuantizationInfo &qinfo)
Dequantize a value given a 8-bit asymmetric quantization scheme.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
PixelValue constant_border_value
Constant value to use for constant border mode policy.
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
float calculate_resize_ratio(size_t input_size, size_t output_size, bool align_corners=false)
Returns resize ratio between input and output with consideration of aligned corners.
void set_num_dimensions(size_t num_dimensions)
Set number of dimensions.
quantized, asymmetric fixed-point 8-bit number signed
Includes all wrapper headers at once.
Container for valid region of a window.
size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get the index of the given dimension.
DataLayout data_layout
Data layout to use.
Iterator updated by execute_window_loop for each window element.
float delta_bilinear(float a00, float a01, float a10, float a11, float dx_val, float dy_val)
Computes bilinear interpolation using the top-left, top-right, bottom-left, bottom-right pixels and t...
DataType
Available data types.
DataLayout
[DataLayout enum definition]
void qasymm8_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
Describe a multidimensional execution window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
NEScaleKernel()
Default constructor.
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
constexpr const Dimension & x() const
Alias to access the first dimension of the window.