58 create_arm_pooling_requant<uint8_t, uint8_t>(
src,
dst,
info, cpu_info);
62 create_arm_pooling<uint8_t, uint8_t>(
src,
dst,
info, cpu_info);
68 create_arm_pooling_requant<int8_t, int8_t>(
src,
dst,
info, cpu_info);
72 create_arm_pooling<int8_t, int8_t>(
src,
dst,
info, cpu_info);
75 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 77 create_arm_pooling<float16_t, float16_t>(
src,
dst,
info, cpu_info);
81 create_arm_pooling<float, float>(
src,
dst,
info, cpu_info);
88 INEKernel::configure(win);
102 "Only AVG and MAX pooling are supported by assembly kernels");
111 if(src_qinfo != dst_qinfo)
113 const float multiplier = src_qinfo.
scale / dst_qinfo.scale;
114 int32_t dst_multiplier{};
152 const auto in_ptr = src->
buffer() + src->info()->offset_first_element_in_bytes();
156 const auto src_shape = src->info()->tensor_shape();
158 const auto src_padding = src->info()->padding();
161 const size_t ld_src_col = src_shape[0] + src_padding.
left + src_padding.right;
162 const size_t ld_src_row = ld_src_col * (src_shape[1] + src_padding.top + src_padding.bottom);
163 const size_t ld_src_batch = ld_src_row * src_shape[2];
164 const size_t ld_dst_col =
dst_shape[0] + dst_padding.left + dst_padding.right;
165 const size_t ld_dst_row = ld_dst_col * (
dst_shape[1] + dst_padding.top + dst_padding.bottom);
166 const size_t ld_dst_batch = ld_dst_row *
dst_shape[2];
168 _kernel_asm->execute(in_ptr, ld_src_col, ld_src_row, ld_src_batch,
169 out_ptr, ld_dst_col, ld_dst_row, ld_dst_batch,
175 return _kernel_asm->get_working_size(num_threads);
180 return _kernel_asm !=
nullptr;
183 template <
typename Typesrc,
typename Typedst>
188 arm_conv::pooling::PoolingWindow window{};
189 window.cols =
static_cast<unsigned int>(info.
pool_size.
x());
190 window.rows =
static_cast<unsigned int>(info.
pool_size.
y());
192 arm_conv::pooling::PoolingStride stride{};
199 constexpr
unsigned int idx_channels = 0;
200 constexpr
unsigned int idx_batches = 3;
202 const unsigned int n_batches = src->
dimension(idx_batches);
203 const unsigned int src_rows = src->
dimension(idx_height);
204 const unsigned int src_cols = src->
dimension(idx_width);
205 const unsigned int n_channels = src->
dimension(idx_channels);
206 const unsigned int dst_rows = dst->
dimension(idx_height);
207 const unsigned int dst_cols = dst->
dimension(idx_width);
209 arm_conv::pooling::PoolingArgs
args(&cpu_info, pool_type, window, stride, info.
exclude_padding, n_batches, src_rows, src_cols, n_channels, dst_rows, dst_cols, padding,
nullptr);
212 auto pooling_kernel_asm = arm_conv::pooling::pooling<Typesrc, Typedst>(
args);
213 if(pooling_kernel_asm ==
nullptr)
219 _kernel_asm = std::move(pooling_kernel_asm);
222 template <
typename Typesrc,
typename Typedst>
227 arm_conv::pooling::PoolingWindow window{};
228 window.cols =
static_cast<unsigned int>(info.
pool_size.
x());
229 window.rows =
static_cast<unsigned int>(info.
pool_size.
y());
231 arm_conv::pooling::PoolingStride stride{};
238 constexpr
unsigned int idx_channels = 0;
239 constexpr
unsigned int idx_batches = 3;
241 const unsigned int n_batches = src->
dimension(idx_batches);
242 const unsigned int src_rows = src->
dimension(idx_height);
243 const unsigned int src_cols = src->
dimension(idx_width);
244 const unsigned int n_channels = src->
dimension(idx_channels);
245 const unsigned int dst_rows = dst->
dimension(idx_height);
246 const unsigned int dst_cols = dst->
dimension(idx_width);
248 arm_conv::pooling::PoolingArgs
args(&cpu_info, pool_type, window, stride, info.
exclude_padding, n_batches, src_rows, src_cols, n_channels, dst_rows, dst_cols, padding,
nullptr);
253 const float multiplier = src_qinfo.
scale / dst_qinfo.scale;
254 int32_t dst_multiplier{};
258 const arm_conv::pooling::Requantize32 requant_args(src_qinfo.offset,
265 auto pooling_kernel_asm = arm_conv::pooling::pooling<Typesrc, Typedst, arm_conv::pooling::Requantize32>(
args, requant_args);
266 if(pooling_kernel_asm ==
nullptr)
272 _kernel_asm = std::move(pooling_kernel_asm);
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
bool empty() const
Checks if pack is empty.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Store the tensor's metadata.
size_t x() const
Semantic accessor for width as x.
unsigned int pad_top() const
Get the top padding.
Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplier, int32_t *shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Interface for Neon tensor.
SimpleTensor< float > src
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info)
Indicates whether or not this function can be used to process the given parameters.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
Calculate the output pool shape of a tensor.
bool is_configured() const
Was the asm kernel successfully configured?
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
Class to describe a number of elements in each dimension.
virtual uint8_t * buffer() const =0
Interface to be implemented by the child class to return a pointer to CPU memory. ...
std::pair< unsigned int, unsigned int > stride() const
Get the stride.
Pooling Layer Information struct.
UniformQuantizationInfo uniform() const
Return per layer quantization info.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
unsigned int pad_right() const
Get the right padding.
size_t get_working_size(unsigned int num_threads) const
Get size of the workspace needed by the assembly kernel.
virtual PaddingSize padding() const =0
Padding of tensor.
unsigned int left
left of the border
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
size_t y() const
Semantic accessor for height as y.
virtual size_t offset_first_element_in_bytes() const =0
The offset from the beginning of the memory allocation to the first element of the tensor...
PoolingType
Available pooling types.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
#define ARM_COMPUTE_RETURN_ERROR_MSG(...)
An error is returned with the given description.
PadStrideInfo pad_stride_info
Information about executing thread and CPU.
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Num samples, height, width, channels.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
quantized, asymmetric fixed-point 8-bit number signed
unsigned int pad_bottom() const
Get the bottom padding.
unsigned int pad_left() const
Get the left padding.
Describe a multidimensional execution window.
void configure(const ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &info, const CPUInfo &cpu_info)
Initialise the kernel's src and dst.
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
bool has_padding() const
Check whether this has any padding.