39 int16x8_t
calculate_kernel(
const uint8x16_t &top_data,
const uint8x16_t &mid_data,
const uint8x16_t &bot_data)
41 const int16x8x2_t top_s16 =
44 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(top_data))),
45 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(top_data)))
48 const int16x8x2_t mid_s16 =
51 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mid_data))),
52 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mid_data)))
55 const int16x8x2_t bot_s16 =
58 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(bot_data))),
59 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(bot_data)))
64 int16x8_t out = top_s16.val[0];
66 out = vaddq_s16(out, vextq_s16(top_s16.val[0], top_s16.val[1], 1));
68 out = vaddq_s16(out, vextq_s16(top_s16.val[0], top_s16.val[1], 2));
70 out = vaddq_s16(out, mid_s16.val[0]);
72 out = vaddq_s16(out, vextq_s16(mid_s16.val[0], mid_s16.val[1], 1));
74 out = vaddq_s16(out, vextq_s16(mid_s16.val[0], mid_s16.val[1], 2));
76 out = vaddq_s16(out, bot_s16.val[0]);
78 out = vaddq_s16(out, vextq_s16(bot_s16.val[0], bot_s16.val[1], 1));
80 out = vaddq_s16(out, vextq_s16(bot_s16.val[0], bot_s16.val[1], 2));
84 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 94 unsigned char *
const input_top_ptr = _input->ptr_to_element(
Coordinates(-1, -1));
95 unsigned char *
const input_mid_ptr = _input->ptr_to_element(
Coordinates(-1, 0));
96 unsigned char *
const input_bot_ptr = _input->ptr_to_element(
Coordinates(-1, +1));
98 const float16x8_t oneovernine = vdupq_n_f16(1.0f / 9.0f);
102 const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.
offset());
103 const uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.
offset());
104 const uint8x16_t bot_data = vld1q_u8(input_bot_ptr + input.
offset());
109 outfloat =
vmulq_f16(outfloat, oneovernine);
140 constexpr
unsigned int num_elems_read_per_iteration = 16;
141 constexpr
unsigned int num_elems_written_per_iteration = 8;
142 constexpr
unsigned int num_rows_read_per_iteration = 3;
143 constexpr
int rect_offset_xy = -1;
153 INEKernel::configure(win);
165 unsigned char *
const input_top_ptr = _input->ptr_to_element(
Coordinates(-1, -1));
166 unsigned char *
const input_mid_ptr = _input->ptr_to_element(
Coordinates(-1, 0));
167 unsigned char *
const input_bot_ptr = _input->ptr_to_element(
Coordinates(-1, +1));
169 const int shift = 19;
170 int value = (1 << shift) / 9 + 1;
171 const int32x4_t oneovernine = vdupq_n_s32(value);
175 const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.
offset());
176 const uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.
offset());
177 const uint8x16_t bot_data = vld1q_u8(input_bot_ptr + input.
offset());
181 int32x4_t outfloathigh = vmovl_s16(vget_high_s16(out));
182 int32x4_t outfloatlow = vmovl_s16(vget_low_s16(out));
184 outfloathigh = vmulq_s32(outfloathigh, oneovernine);
185 outfloatlow = vmulq_s32(outfloatlow, oneovernine);
186 outfloathigh = vshrq_n_s32(outfloathigh, shift);
187 outfloatlow = vshrq_n_s32(outfloatlow, shift);
188 out = vcombine_s16(vqmovn_s32((outfloatlow)),
189 vqmovn_s32((outfloathigh)));
191 vst1_u8(output.
ptr(), vqmovun_s16(out));
bool set_format_if_unknown(ITensorInfo &info, Format format)
Set the format, data type and number of channels to the specified value if the current data type is u...
int16x8_t calculate_kernel(const uint8x16_t &top_data, const uint8x16_t &mid_data, const uint8x16_t &bot_data)
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
uint16x8_t vcvtq_f16_s16(float16x8_t)
const Window & window() const
The maximum window the kernel can be executed on.
float16x8_t vmulq_f16(float16x8_t, float16x8_t)
Container for 2D border size.
1 channel, 1 U8 per channel
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
Interface for Neon tensor.
Copyright (c) 2017-2021 Arm Limited.
virtual ValidRegion valid_region() const =0
Valid region of the tensor.
void configure(const ITensor *input, ITensor *output, bool border_undefined)
Set the source, destination and border mode of the kernel.
Implementation of a rectangular access pattern.
bool update_window_and_padding(Window &win, Ts &&... patterns)
Update window and padding size for each of the access patterns.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Class to describe a number of elements in each dimension.
Implementation of a row access pattern.
#define ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(...)
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
bool set_shape_if_empty(ITensorInfo &info, const TensorShape &shape)
Set the shape to the specified value if the current assignment is empty.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
unsigned int num_elems_processed_per_iteration
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
int16x8_t vcvtq_s16_f16(float16x8_t)
constexpr size_t offset() const
Return the offset in bytes from the first element to the current position of the iterator.
Iterator updated by execute_window_loop for each window element.
BorderSize border_size() const override
The size of the border for that kernel.
Describe a multidimensional execution window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)