51 static const std::vector<CpuScaleKernel::ScaleKernel> available_kernels =
53 #if defined(ARM_COMPUTE_ENABLE_SVE) 56 [](
const DataTypeISASelectorData & data) {
return data.dt ==
DataType::F16 && data.isa.sve; },
61 [](
const DataTypeISASelectorData & data) {
return data.dt ==
DataType::F32 && data.isa.sve; },
66 [](
const DataTypeISASelectorData & data) {
return data.dt ==
DataType::QASYMM8 && data.isa.sve; },
76 [](
const DataTypeISASelectorData & data) {
return data.dt ==
DataType::U8 && data.isa.sve; },
81 [](
const DataTypeISASelectorData & data) {
return data.dt ==
DataType::S16 && data.isa.sve; },
85 #if defined(ARM_COMPUTE_ENABLE_NEON) 86 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) 89 [](
const DataTypeISASelectorData & data) {
return data.dt ==
DataType::F16 && data.isa.fp16; },
95 [](
const DataTypeISASelectorData & data) {
return data.dt ==
DataType::F32; },
100 [](
const DataTypeISASelectorData & data) {
return data.dt ==
DataType::QASYMM8; },
110 [](
const DataTypeISASelectorData & data) {
return data.dt ==
DataType::U8; },
115 [](
const DataTypeISASelectorData & data) {
return data.dt ==
DataType::S16; },
121 Status validate_arguments(
const ITensorInfo *
src,
const ITensorInfo *dx,
const ITensorInfo *dy,
122 const ITensorInfo *offsets, ITensorInfo *
dst,
const ScaleKernelInfo &
info)
138 const auto output_width = dst->dimension(width_index);
139 const auto output_height = dst->dimension(height_index);
151 if(dx !=
nullptr && dy !=
nullptr)
186 _run_method = uk->ukernel;
201 _sampling_offset = 0.5f;
217 #ifdef ENABLE_NCHW_KERNELS 221 std::string function_to_call(
"scale_");
226 static std::map<std::string, ScaleFunctionPtr> map_function =
228 {
"scale_U8_NCHW_AREA_CONSTANT", &CpuScaleKernel::scale_area_nchw_u8 },
230 {
"scale_U8_NCHW_BILINEAR", &CpuScaleKernel::scale_bilinear_nchw<uint8_t> },
231 {
"scale_U8_NCHW_NEAREST_NEIGHBOUR", &CpuScaleKernel::scale_nearest_nchw<uint8_t> },
233 {
"scale_QASYMM8_NCHW_BILINEAR", &CpuScaleKernel::scale_bilinear_qasymm<uint8_t> },
234 {
"scale_QASYMM8_NCHW_NEAREST_NEIGHBOUR", &CpuScaleKernel::scale_nearest_nchw<uint8_t> },
236 {
"scale_QASYMM8_SIGNED_NCHW_BILINEAR", &CpuScaleKernel::scale_bilinear_qasymm<int8_t> },
237 {
"scale_QASYMM8_SIGNED_NCHW_NEAREST_NEIGHBOUR", &CpuScaleKernel::scale_nearest_nchw<int8_t> },
239 {
"scale_S16_NCHW_BILINEAR", &CpuScaleKernel::scale_bilinear_nchw<int16_t> },
240 {
"scale_S16_NCHW_NEAREST_NEIGHBOUR", &CpuScaleKernel::scale_nearest_nchw<int16_t> },
242 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 243 {
"scale_F16_NCHW_BILINEAR", &CpuScaleKernel::scale_bilinear_nchw<float16_t> },
244 {
"scale_F16_NCHW_NEAREST_NEIGHBOUR", &CpuScaleKernel::scale_nearest_nchw<float16_t> },
247 {
"scale_F32_NCHW_BILINEAR", &CpuScaleKernel::scale_bilinear_nchw<float> },
248 {
"scale_F32_NCHW_NEAREST_NEIGHBOUR", &CpuScaleKernel::scale_nearest_nchw<float> },
250 auto it = map_function.find(function_to_call);
251 if(it != map_function.end())
256 #endif // ENABLE_NCHW_KERNELS 260 ICpuKernel::configure(win);
263 #ifdef ENABLE_NCHW_KERNELS 264 template <
typename T>
291 Iterator offsets_i(offsets, win_off);
294 const auto offsets_ptr =
reinterpret_cast<const int32_t *
>(offsets_i.
ptr());
296 id.y() + _sampling_offset)
298 const int32_t offset_row = in_yi * in_stride_x;
299 *
reinterpret_cast<T *
>(dst_i.
ptr()) = *(reinterpret_cast<const T *>(src_i.
ptr()) + offsets_ptr[0] + offset_row);
301 src_i, offsets_i, dst_i);
304 template <
typename T>
326 Iterator offsets_i(offsets, win_off);
336 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 337 using ConstType =
typename std::conditional<std::is_same<T, float16_t>::value,
half, T>
::type;
341 const T const_border_value =
static_cast<T
>(_constant_border_value.
get<ConstType>());
344 const int32_t index_h = std::floor((
id.y() + _sampling_offset) * hr - _sampling_offset);
345 const auto index_w = *(
reinterpret_cast<const int32_t *
>(offsets_i.
ptr()));
346 const auto dx_val = *(
reinterpret_cast<const float *
>(dx_i.
ptr()));
347 const auto dy_val = *(
reinterpret_cast<const float *
>(dy_i.
ptr()));
348 const auto pixel_row_ptr =
reinterpret_cast<const T *
>(src_i.
ptr());
350 const auto a00 = (0 <= index_w && index_w < in_dim_w && 0 <= index_h && index_h < in_dim_h) ? (*(pixel_row_ptr + index_w + index_h * in_stride_w)) : const_border_value;
351 const auto a01 = (-1 <= index_w && index_w < in_dim_w - 1 && 0 <= index_h && index_h < in_dim_h) ? (*(pixel_row_ptr + index_w + 1 + index_h * in_stride_w)) : const_border_value;
352 const auto a10 = (0 <= index_w && index_w < in_dim_w && -1 <= index_h
353 && index_h < in_dim_h - 1) ?
354 (*(pixel_row_ptr + index_w + index_h * in_stride_w + in_stride_w)) :
356 const auto a11 = (-1 <= index_w && index_w < in_dim_w - 1 && -1 <= index_h
357 && index_h < in_dim_h - 1) ?
358 (*(pixel_row_ptr + index_w + 1 + index_h * in_stride_w + in_stride_w)) :
363 src_i, offsets_i, dx_i, dy_i, dst_i);
369 const int index_h = std::floor((
id.y() + _sampling_offset) * hr - _sampling_offset);
370 const auto index_w = *(
reinterpret_cast<const int32_t *
>(offsets_i.
ptr()));
371 const auto dx_val = *(
reinterpret_cast<const float *
>(dx_i.
ptr()));
372 const auto dy_val = *(
reinterpret_cast<const float *
>(dy_i.
ptr()));
373 const auto pixel_row_ptr =
reinterpret_cast<const T *
>(src_i.
ptr());
375 auto clamped_x = utility::clamp<int>(index_w, 0, in_dim_w - 1);
376 auto clamped_x1 = utility::clamp<int>(index_w + 1, 0, in_dim_w - 1);
377 auto clamped_y = utility::clamp<int>(index_h, 0, in_dim_h - 1);
378 auto clamped_y1 = utility::clamp<int>(index_h + 1, 0, in_dim_h - 1);
380 const auto a00 = *(pixel_row_ptr + clamped_x + clamped_y * in_stride_w);
381 const auto a01 = *(pixel_row_ptr + clamped_x1 + clamped_y * in_stride_w);
382 const auto a10 = *(pixel_row_ptr + clamped_x + clamped_y1 * in_stride_w);
383 const auto a11 = *(pixel_row_ptr + clamped_x1 + clamped_y1 * in_stride_w);
387 src_i, offsets_i, dx_i, dy_i, dst_i);
398 using namespace scale_helpers;
420 const auto in_ptr =
reinterpret_cast<const uint8_t *
>(src_i.
ptr());
422 uint8x8_t tmp0 = vdup_n_u8(0);
423 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x(),
id.y()), tmp0, 0);
424 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 1,
id.y()), tmp0, 1);
425 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 2,
id.y()), tmp0, 2);
426 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 3,
id.y()), tmp0, 3);
427 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 4,
id.y()), tmp0, 4);
428 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 5,
id.y()), tmp0, 5);
429 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 6,
id.y()), tmp0, 6);
430 tmp0 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 7,
id.y()), tmp0, 7);
432 uint8x8_t tmp1 = vdup_n_u8(0);
433 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 8,
id.y()), tmp1, 0);
434 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 9,
id.y()), tmp1, 1);
435 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 10,
id.y()), tmp1, 2);
436 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 11,
id.y()), tmp1, 3);
437 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 12,
id.y()), tmp1, 4);
438 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 13,
id.y()), tmp1, 5);
439 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 14,
id.y()), tmp1, 6);
440 tmp1 = vset_lane_u8(
pixel_area_c1u8_clamp(in_ptr, in_stride, w, h, wr, hr,
id.x() + 15,
id.y()), tmp1, 7);
442 vst1q_u8(dst_i.
ptr(), vcombine_u8(tmp0, tmp1));
447 template <
typename T>
484 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 485 using ConstType =
typename std::conditional<std::is_same<T, float16_t>::value,
half, T>
::type;
489 const T const_border_value =
static_cast<T
>(_constant_border_value.get<ConstType>());
492 const int32_t index_h = std::floor((
id[idx_height] + _sampling_offset) * hr - _sampling_offset);
493 const int32_t index_w = *(
reinterpret_cast<const int32_t *
>(offsets->
ptr_to_element(
Coordinates(
id[idx_width],
id[idx_height]))));
496 const auto pixel_row_ptr =
reinterpret_cast<const T *
>(src_i.
ptr());
498 const auto a00 = (0 <= index_w && index_w < in_dim_w && 0 <= index_h && index_h < in_dim_h) ?
499 (*(pixel_row_ptr + index_w * stride_w + index_h * stride_h)) :
501 const auto a01 = (-1 <= index_w && index_w < in_dim_w - 1 && 0 <= index_h && index_h < in_dim_h) ?
502 (*(pixel_row_ptr + (index_w + 1) * stride_w + index_h * stride_h)) :
504 const auto a10 = (0 <= index_w && index_w < in_dim_w && -1 <= index_h && index_h < in_dim_h - 1) ?
505 (*(pixel_row_ptr + index_w * stride_w + (index_h + 1) * stride_h)) :
507 const auto a11 = (-1 <= index_w && index_w < in_dim_w - 1 && -1 <= index_h && index_h < in_dim_h - 1) ?
508 (*(pixel_row_ptr + (index_w + 1) * stride_w + (index_h + 1) * stride_h)) :
523 const int index_h = std::floor((
id[idx_height] + _sampling_offset) * hr - _sampling_offset);
524 const int32_t index_w = *(
reinterpret_cast<const int32_t *
>(offsets->
ptr_to_element(
Coordinates(
id[idx_width],
id[idx_height]))));
527 const auto pixel_row_ptr =
reinterpret_cast<const T *
>(src_i.
ptr());
529 auto clamped_w = utility::clamp<int>(index_w, 0, in_dim_w - 1);
530 auto clamped_w1 = utility::clamp<int>(index_w + 1, 0, in_dim_w - 1);
531 auto clamped_h = utility::clamp<int>(index_h, 0, in_dim_h - 1);
532 auto clamped_h1 = utility::clamp<int>(index_h + 1, 0, in_dim_h - 1);
534 const auto a00 = *(pixel_row_ptr + clamped_w * stride_w + clamped_h * stride_h);
535 const auto a01 = *(pixel_row_ptr + clamped_w1 * stride_w + clamped_h * stride_h);
536 const auto a10 = *(pixel_row_ptr + clamped_w * stride_w + clamped_h1 * stride_h);
537 const auto a11 = *(pixel_row_ptr + clamped_w1 * stride_w + clamped_h1 * stride_h);
552 #endif // ENABLE_NCHW_KERNELS 577 (this->*_func)(src, dst, dx, dy, offsets, window);
581 _run_method(src, dst, offsets, dx, dy, _policy, _border_mode, _constant_border_value, _sampling_offset, _align_corners, window);
587 return _name.c_str();
592 return available_kernels;
void s16_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
BorderMode border_mode
Border mode policy.
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Class describing the value of a pixel for any image format.
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
static const auto * get_implementation(const SelectorType &selector, KernelSelectionType selection_type=KernelSelectionType::Supported)
Micro-kernel selector.
void qasymm8_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
InterpolationPolicy interpolation_policy
Interpolation type to use.
#define REGISTER_FP16_NEON(func_name)
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
1 channel, 1 U8 per channel
#define REGISTER_FP32_NEON(func_name)
void get(uint8_t &v) const
Interpret the pixel value as a U8.
void qasymm8_signed_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
half_float::half half
16-bit floating point type
1 channel, 1 F32 per channel
#define REGISTER_FP32_SVE(func_name)
Output values are defined by bilinear interpolation between the pixels.
#define REGISTER_QASYMM8_SVE(func_name)
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
#define REGISTER_QASYMM8_SIGNED_NEON(func_name)
bool align_corners
Align corners of input and output.
Store the tensor's metadata.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
void fp16_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
Describe one of the image's dimensions with a start, end and step.
T round_half_away_from_zero(T value)
Round floating-point value with half value rounding away from zero.
Output values are defined to match the source pixel whose center is nearest to the sample position...
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
decltype(strategy::transforms) typedef type
Interface for CPU tensor.
SimpleTensor< float > src
Copyright (c) 2017-2021 Arm Limited.
__kernel void scale_bilinear_nchw(__global uchar *in_ptr, uint in_stride_x, uint in_step_x, uint in_stride_y, uint in_step_y, uint in_offset_first_element_in_bytes, __global uchar *out_ptr, uint out_stride_x, uint out_step_x, uint out_stride_y, uint out_step_y, uint out_offset_first_element_in_bytes)
Performs an affine transformation on an image interpolating with the BILINEAR method.
1 channel, 1 F16 per channel
Samples are taken at pixel center.
bool is_align_corners_allowed_sampling_policy(SamplingPolicy sampling_policy)
Returns if aligned corners are allowed for the given sampling policy.
#define REGISTER_INTEGER_NEON(func_name)
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
#define REGISTER_QASYMM8_SIGNED_SVE(func_name)
1 channel, 1 S32 per channel
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
void u8_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
#define REGISTER_QASYMM8_NEON(func_name)
SamplingPolicy sampling_policy
Sampling policy used by the interpolation.
quantized, asymmetric fixed-point 8-bit number unsigned
Class to describe a number of elements in each dimension.
uint8_t pixel_area_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float wr, float hr, int x, int y)
Return the pixel at (x,y) using area interpolation by clamping when out of borders.
void qasymm8_signed_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
const std::string & string_from_interpolation_policy(InterpolationPolicy policy)
Translates a given interpolation policy to a string.
void fp32_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
UniformQuantizationInfo uniform() const
Return per layer quantization info.
#define REGISTER_INTEGER_SVE(func_name)
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
Samples are taken at pixel top left corner.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
virtual PaddingSize padding() const =0
Padding of tensor.
unsigned int left
left of the border
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
unsigned int right
right of the border
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
1 channel, 1 S16 per channel
Output values are determined by averaging the source pixels whose areas fall under the area of the de...
void u8_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
Num samples, channels, height, width.
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
const std::string & string_from_data_layout(DataLayout dl)
Convert a data layout identity into a string.
Information about executing thread and CPU.
#define REGISTER_FP16_SVE(func_name)
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Borders are left undefined.
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Pixels outside the image are assumed to have the same value as the closest image pixel.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Num samples, height, width, channels.
constexpr const Dimension & y() const
Alias to access the second dimension of the window.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
void s16_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
void configure(const ITensorInfo *src, const ITensorInfo *dx, const ITensorInfo *dy, const ITensorInfo *offsets, ITensorInfo *dst, const ScaleKernelInfo &info)
Initialise the kernel's inputs, output and interpolation policy.
static Status validate(const ITensorInfo *src, const ITensorInfo *dx, const ITensorInfo *dy, const ITensorInfo *offsets, ITensorInfo *dst, const ScaleKernelInfo &info)
Static function to check if given info will lead to a valid configuration.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
static float dequantize(QUANTIZED_TYPE value, const UniformQuantizationInfo &qinfo)
Dequantize a value given a 8-bit asymmetric quantization scheme.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
PixelValue constant_border_value
Constant value to use for constant border mode policy.
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
float calculate_resize_ratio(size_t input_size, size_t output_size, bool align_corners=false)
Returns resize ratio between input and output with consideration of aligned corners.
quantized, asymmetric fixed-point 8-bit number signed
Includes all wrapper headers at once.
virtual const Strides & strides_in_bytes() const =0
The strides in bytes for accessing each dimension of the tensor.
static CPUInfo & get()
Access the KernelLibrary singleton.
static const std::vector< ScaleKernel > & get_available_kernels()
DataLayout data_layout
Data layout to use.
Iterator updated by execute_window_loop for each window element.
float delta_bilinear(float a00, float a01, float a10, float a11, float dx_val, float dy_val)
Computes bilinear interpolation using the top-left, top-right, bottom-left, bottom-right pixels and t...
DataLayout
[DataLayout enum definition]
const char * name() const override
Name of the kernel.
void qasymm8_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy, InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window)
Describe a multidimensional execution window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
cpuinfo::CpuIsaInfo get_isa() const
Gets the current cpu's ISA information.
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
constexpr const Dimension & x() const
Alias to access the first dimension of the window.