41 : _run_sobel_x(false), _run_sobel_y(false), _input(nullptr), _output_x(nullptr), _output_y(nullptr)
55 _run_sobel_x = output_x !=
nullptr;
56 _run_sobel_y = output_y !=
nullptr;
74 constexpr
unsigned int num_elems_read_per_iteration = 16;
75 constexpr
unsigned int num_elems_written_per_iteration = 8;
76 constexpr
unsigned int num_rows_read_per_iteration = 3;
90 INEKernel::configure(win);
109 output_y =
Iterator(_output_y, window);
114 output_x =
Iterator(_output_x, window);
117 static const int16x8_t two = vdupq_n_s16(2);
118 static const int16x8_t minustwo = vdupq_n_s16(-2);
120 if(_run_sobel_y && _run_sobel_x)
124 const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.
offset());
125 const uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.
offset());
126 const uint8x16_t bot_data = vld1q_u8(input_bot_ptr + input.
offset());
128 const int16x8x2_t top_s16 =
131 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(top_data))),
132 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(top_data)))
135 const int16x8x2_t mid_s16 =
138 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mid_data))),
139 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mid_data)))
142 const int16x8x2_t bot_s16 =
145 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(bot_data))),
146 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(bot_data)))
152 int16x8_t out_y = vnegq_s16(top_s16.val[0]);
154 out_y = vmlaq_s16(out_y, vextq_s16(top_s16.val[0], top_s16.val[1], 1), minustwo);
156 out_y = vsubq_s16(out_y, vextq_s16(top_s16.val[0], top_s16.val[1], 2));
158 out_y = vaddq_s16(out_y, bot_s16.val[0]);
160 out_y = vmlaq_s16(out_y, vextq_s16(bot_s16.val[0], bot_s16.val[1], 1), two);
162 out_y = vaddq_s16(out_y, vextq_s16(bot_s16.val[0], bot_s16.val[1], 2));
164 vst1q_s16(reinterpret_cast<int16_t *>(output_y.
ptr()), out_y);
168 int16x8_t out_x = vnegq_s16(top_s16.val[0]);
170 out_x = vaddq_s16(out_x, vextq_s16(top_s16.val[0], top_s16.val[1], 2));
172 out_x = vmlaq_s16(out_x, mid_s16.val[0], minustwo);
174 out_x = vmlaq_s16(out_x, vextq_s16(mid_s16.val[0], mid_s16.val[1], 2), two);
176 out_x = vsubq_s16(out_x, bot_s16.val[0]);
178 out_x = vaddq_s16(out_x, vextq_s16(bot_s16.val[0], bot_s16.val[1], 2));
180 vst1q_s16(reinterpret_cast<int16_t *>(output_x.
ptr()), out_x);
182 input, output_x, output_y);
184 else if(_run_sobel_x)
188 const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.
offset());
189 const uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.
offset());
190 const uint8x16_t bot_data = vld1q_u8(input_bot_ptr + input.
offset());
192 const int16x8x2_t top_s16 =
195 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(top_data))),
196 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(top_data)))
199 const int16x8x2_t mid_s16 =
202 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mid_data))),
203 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mid_data)))
206 const int16x8x2_t bot_s16 =
209 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(bot_data))),
210 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(bot_data)))
216 int16x8_t out = vnegq_s16(top_s16.val[0]);
218 out = vaddq_s16(out, vextq_s16(top_s16.val[0], top_s16.val[1], 2));
220 out = vmlaq_s16(out, mid_s16.val[0], minustwo);
222 out = vmlaq_s16(out, vextq_s16(mid_s16.val[0], mid_s16.val[1], 2), two);
224 out = vsubq_s16(out, bot_s16.val[0]);
226 out = vaddq_s16(out, vextq_s16(bot_s16.val[0], bot_s16.val[1], 2));
228 vst1q_s16(reinterpret_cast<int16_t *>(output_x.
ptr()), out);
232 else if(_run_sobel_y)
236 const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.
offset());
237 const uint8x16_t bot_data = vld1q_u8(input_bot_ptr + input.
offset());
239 const int16x8x2_t top_s16 =
242 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(top_data))),
243 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(top_data)))
246 const int16x8x2_t bot_s16 =
249 vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(bot_data))),
250 vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(bot_data)))
256 int16x8_t out = vnegq_s16(top_s16.val[0]);
258 out = vmlaq_s16(out, vextq_s16(top_s16.val[0], top_s16.val[1], 1), minustwo);
260 out = vsubq_s16(out, vextq_s16(top_s16.val[0], top_s16.val[1], 2));
262 out = vaddq_s16(out, bot_s16.val[0]);
264 out = vmlaq_s16(out, vextq_s16(bot_s16.val[0], bot_s16.val[1], 1), two);
266 out = vaddq_s16(out, vextq_s16(bot_s16.val[0], bot_s16.val[1], 2));
268 vst1q_s16(reinterpret_cast<int16_t *>(output_y.
ptr()), out);
unsigned int top
top of the border
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
Container for 2D border size.
1 channel, 1 U8 per channel
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
NESobel3x3Kernel()
Default constructor.
Interface for Neon tensor.
void configure(const ITensor *input, ITensor *output_x, ITensor *output_y, bool border_undefined)
Initialise the kernel's source, destination and border mode.
Copyright (c) 2017-2021 Arm Limited.
virtual ValidRegion valid_region() const =0
Valid region of the tensor.
Implementation of a rectangular access pattern.
bool update_window_and_padding(Window &win, Ts &&... patterns)
Update window and padding size for each of the access patterns.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Class to describe a number of elements in each dimension.
Implementation of a row access pattern.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
unsigned int left
left of the border
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
1 channel, 1 S16 per channel
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
unsigned int num_elems_processed_per_iteration
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
constexpr size_t offset() const
Return the offset in bytes from the first element to the current position of the iterator.
Iterator updated by execute_window_loop for each window element.
Describe a multidimensional execution window.
BorderSize border_size() const override
The size of the border for that kernel.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)