47 : _func(nullptr), _input(nullptr), _output_x(nullptr), _output_y(nullptr)
61 const bool run_der_x = output_x !=
nullptr;
62 const bool run_der_y = output_y !=
nullptr;
79 constexpr
unsigned int num_rows_read_per_iteration = 3;
93 if(run_der_x && run_der_y)
95 _func = &NEDerivativeKernel::derivative_xy;
104 _func = &NEDerivativeKernel::derivative_x;
110 _func = &NEDerivativeKernel::derivative_y;
120 INEKernel::configure(win);
123 void NEDerivativeKernel::derivative_x(
const Window &
window)
132 const uint8x16_t l_data = vld1q_u8(in.
ptr() - 1);
133 const uint8x16_t r_data = vld1q_u8(in.
ptr() + 1);
136 const int16x8_t out0 = vsubq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(r_data))),
137 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(l_data))));
140 const int16x8_t out1 = vsubq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(r_data))),
141 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(l_data))));
144 vst1q_s16(reinterpret_cast<int16_t *>(out_x.
ptr()), out0);
145 vst1q_s16(reinterpret_cast<int16_t *>(out_x.
ptr()) + 8, out1);
150 void NEDerivativeKernel::derivative_y(
const Window &window)
161 const uint8x16_t t_data = vld1q_u8(in.
ptr() - stride);
162 const uint8x16_t b_data = vld1q_u8(in.
ptr() + stride);
165 const int16x8_t out0 = vsubq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(b_data))),
166 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(t_data))));
169 const int16x8_t out1 = vsubq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(b_data))),
170 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(t_data))));
173 vst1q_s16(reinterpret_cast<int16_t *>(out_y.
ptr()), out0);
174 vst1q_s16(reinterpret_cast<int16_t *>(out_y.
ptr()) + 8, out1);
179 void NEDerivativeKernel::derivative_xy(
const Window &window)
191 const uint8x16_t t_data = vld1q_u8(in.
ptr() - stride);
192 const uint8x16_t b_data = vld1q_u8(in.
ptr() + stride);
193 const uint8x16_t l_data = vld1q_u8(in.
ptr() - 1);
194 const uint8x16_t r_data = vld1q_u8(in.
ptr() + 1);
197 const int16x8_t out0 = vsubq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(b_data))),
198 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(t_data))));
201 const int16x8_t out1 = vsubq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(b_data))),
202 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(t_data))));
205 const int16x8_t out2 = vsubq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(r_data))),
206 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(l_data))));
209 const int16x8_t out3 = vsubq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(r_data))),
210 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(l_data))));
213 vst1q_s16(reinterpret_cast<int16_t *>(out_y.
ptr()), out0);
214 vst1q_s16(reinterpret_cast<int16_t *>(out_y.
ptr()) + 8, out1);
217 vst1q_s16(reinterpret_cast<int16_t *>(out_x.
ptr()), out2);
218 vst1q_s16(reinterpret_cast<int16_t *>(out_x.
ptr()) + 8, out3);
230 (this->*_func)(window);
unsigned int top
top of the border
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
Container for 2D border size.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
1 channel, 1 U8 per channel
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Interface for Neon tensor.
Copyright (c) 2017-2021 Arm Limited.
virtual ValidRegion valid_region() const =0
Valid region of the tensor.
Implementation of a rectangular access pattern.
bool update_window_and_padding(Window &win, Ts &&... patterns)
Update window and padding size for each of the access patterns.
BorderSize border_size() const override
The size of the border for that kernel.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Class to describe a number of elements in each dimension.
Implementation of a row access pattern.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
unsigned int left
left of the border
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
1 channel, 1 S16 per channel
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
void configure(const ITensor *input, ITensor *output_x, ITensor *output_y, bool border_undefined)
Initialise the kernel's sources, destination and border.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
NEDerivativeKernel()
Default constructor.
unsigned int num_elems_processed_per_iteration
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
virtual const Strides & strides_in_bytes() const =0
The strides in bytes for accessing each dimension of the tensor.
Iterator updated by execute_window_loop for each window element.
Describe a multidimensional execution window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)