59 output_read_access, output_write_access);
63 IKernel::configure(win);
90 const uint8x16_t input_pixels = vld1q_u8(input.
ptr());
92 const uint16x8x2_t tmp =
95 vmovl_u8(vget_low_u8(input_pixels)),
96 vmovl_u8(vget_high_u8(input_pixels))
100 uint32x4x4_t pixels =
103 vmovl_u16(vget_low_u16(tmp.val[0])),
104 vmovl_u16(vget_high_u16(tmp.val[0])),
105 vmovl_u16(vget_low_u16(tmp.val[1])),
106 vmovl_u16(vget_high_u16(tmp.val[1]))
111 const size_t off = output.
offset() / 4;
114 const uint32_t *
const top_mid_ptr = output_top_mid + off;
116 pixels.val[0] = vaddq_u32(vld1q_u32(top_mid_ptr), pixels.val[0]);
117 pixels.val[1] = vaddq_u32(vld1q_u32(top_mid_ptr + 4), pixels.val[1]);
118 pixels.val[2] = vaddq_u32(vld1q_u32(top_mid_ptr + 8), pixels.val[2]);
119 pixels.val[3] = vaddq_u32(vld1q_u32(top_mid_ptr + 12), pixels.val[3]);
122 const auto outptr =
reinterpret_cast<uint32_t *
>(output.
ptr());
123 const uint32_t *
const top_left_ptr = output_top_left + off;
125 pixels.val[0] = vsubq_u32(pixels.val[0], vld1q_u32(top_left_ptr));
126 vst1q_u32(outptr, pixels.val[0]);
128 pixels.val[1] = vsubq_u32(pixels.val[1], vld1q_u32(top_left_ptr + 4));
129 vst1q_u32(outptr + 4, pixels.val[1]);
131 pixels.val[2] = vsubq_u32(pixels.val[2], vld1q_u32(top_left_ptr + 8));
132 vst1q_u32(outptr + 8, pixels.val[2]);
134 pixels.val[3] = vsubq_u32(pixels.val[3], vld1q_u32(top_left_ptr + 12));
135 vst1q_u32(outptr + 12, pixels.val[3]);
138 for(
auto i = 0; i < 16; ++i)
140 outptr[i] += outptr[i - 1];
void run(const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
const Window & window() const
The maximum window the kernel can be executed on.
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
void configure(const ITensor *input, ITensor *output)
Set the source, destination and border mode of the kernel.
Container for 2D border size.
1 channel, 1 U8 per channel
BorderSize border_size() const override
The size of the border for that kernel.
Interface for Neon tensor.
Copyright (c) 2017-2021 Arm Limited.
virtual ValidRegion valid_region() const =0
Valid region of the tensor.
Implementation of a rectangular access pattern.
bool update_window_and_padding(Window &win, Ts &&... patterns)
Update window and padding size for each of the access patterns.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
1 channel, 1 U32 per channel
Class to describe a number of elements in each dimension.
Implementation of a row access pattern.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
constexpr uint8_t * ptr() const
Return a pointer to the current pixel.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
bool is_parallelisable() const override
Indicates whether or not the kernel is parallelisable.
unsigned int num_elems_processed_per_iteration
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
constexpr size_t offset() const
Return the offset in bytes from the first element to the current position of the iterator.
Iterator updated by execute_window_loop for each window element.
Describe a multidimensional execution window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)