52 #if defined(__aarch64__) 60 create_arm_pooling_requant<uint8_t, uint8_t>(
src,
dst,
info, cpu_info);
64 create_arm_pooling<uint8_t, uint8_t>(
src,
dst,
info, cpu_info);
70 create_arm_pooling_requant<int8_t, int8_t>(
src,
dst,
info, cpu_info);
74 create_arm_pooling<int8_t, int8_t>(
src,
dst,
info, cpu_info);
77 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 79 create_arm_pooling<float16_t, float16_t>(
src,
dst,
info, cpu_info);
83 create_arm_pooling<float, float>(
src,
dst,
info, cpu_info);
88 #endif // defined(__aarch64__) 91 INEKernel::configure(win);
105 "Only AVG and MAX pooling are supported by assembly kernels");
116 if(src_qinfo != dst_qinfo)
118 const float multiplier = src_qinfo.
scale / dst_qinfo.scale;
119 int32_t dst_multiplier{};
157 const auto in_ptr = src->
buffer() + src->info()->offset_first_element_in_bytes();
161 const auto src_shape = src->info()->tensor_shape();
163 const auto src_padding = src->info()->padding();
166 const size_t ld_src_col = src_shape[0] + src_padding.
left + src_padding.right;
167 const size_t ld_src_row = ld_src_col * (src_shape[1] + src_padding.top + src_padding.bottom);
168 const size_t ld_src_batch = ld_src_row * src_shape[2];
169 const size_t ld_dst_col =
dst_shape[0] + dst_padding.left + dst_padding.right;
170 const size_t ld_dst_row = ld_dst_col * (
dst_shape[1] + dst_padding.top + dst_padding.bottom);
171 const size_t ld_dst_batch = ld_dst_row *
dst_shape[2];
173 _kernel_asm->execute(in_ptr, ld_src_col, ld_src_row, ld_src_batch,
174 out_ptr, ld_dst_col, ld_dst_row, ld_dst_batch,
180 return _kernel_asm->get_working_size(num_threads);
185 return _kernel_asm !=
nullptr;
188 template <
typename Typesrc,
typename Typedst>
193 arm_conv::pooling::PoolingWindow window{};
194 window.cols =
static_cast<unsigned int>(info.
pool_size.
x());
195 window.rows =
static_cast<unsigned int>(info.
pool_size.
y());
197 arm_conv::pooling::PoolingStride stride{};
204 constexpr
unsigned int idx_channels = 0;
205 constexpr
unsigned int idx_batches = 3;
207 const unsigned int n_batches = src->
dimension(idx_batches);
208 const unsigned int src_rows = src->
dimension(idx_height);
209 const unsigned int src_cols = src->
dimension(idx_width);
210 const unsigned int n_channels = src->
dimension(idx_channels);
211 const unsigned int dst_rows = dst->
dimension(idx_height);
212 const unsigned int dst_cols = dst->
dimension(idx_width);
214 arm_conv::pooling::PoolingArgs
args(&cpu_info, pool_type, window, stride, info.
exclude_padding, n_batches, src_rows, src_cols, n_channels, dst_rows, dst_cols, padding,
nullptr);
217 auto pooling_kernel_asm = arm_conv::pooling::pooling<Typesrc, Typedst>(
args);
218 if(pooling_kernel_asm ==
nullptr)
224 _kernel_asm = std::move(pooling_kernel_asm);
227 template <
typename Typesrc,
typename Typedst>
232 arm_conv::pooling::PoolingWindow window{};
233 window.cols =
static_cast<unsigned int>(info.
pool_size.
x());
234 window.rows =
static_cast<unsigned int>(info.
pool_size.
y());
236 arm_conv::pooling::PoolingStride stride{};
243 constexpr
unsigned int idx_channels = 0;
244 constexpr
unsigned int idx_batches = 3;
246 const unsigned int n_batches = src->
dimension(idx_batches);
247 const unsigned int src_rows = src->
dimension(idx_height);
248 const unsigned int src_cols = src->
dimension(idx_width);
249 const unsigned int n_channels = src->
dimension(idx_channels);
250 const unsigned int dst_rows = dst->
dimension(idx_height);
251 const unsigned int dst_cols = dst->
dimension(idx_width);
253 arm_conv::pooling::PoolingArgs
args(&cpu_info, pool_type, window, stride, info.
exclude_padding, n_batches, src_rows, src_cols, n_channels, dst_rows, dst_cols, padding,
nullptr);
258 const float multiplier = src_qinfo.
scale / dst_qinfo.scale;
259 int32_t dst_multiplier{};
263 const arm_conv::pooling::Requantize32 requant_args(src_qinfo.offset,
270 auto pooling_kernel_asm = arm_conv::pooling::pooling<Typesrc, Typedst, arm_conv::pooling::Requantize32>(
args, requant_args);
271 if(pooling_kernel_asm ==
nullptr)
277 _kernel_asm = std::move(pooling_kernel_asm);
280 size_t CpuPool2dAssemblyWrapperKernel::get_mws(
const CPUInfo &platform,
size_t thread_count)
const
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
bool empty() const
Checks if pack is empty.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Store the tensor's metadata.
size_t x() const
Semantic accessor for width as x.
unsigned int pad_top() const
Get the top padding.
Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplier, int32_t *shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Interface for CPU tensor.
SimpleTensor< float > src
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
Calculate the output pool shape of a tensor.
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
bool is_configured() const
Was the asm kernel successfully configured?
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
Class to describe a number of elements in each dimension.
virtual uint8_t * buffer() const =0
Interface to be implemented by the child class to return a pointer to CPU memory. ...
std::pair< unsigned int, unsigned int > stride() const
Get the stride.
Pooling Layer Information struct.
UniformQuantizationInfo uniform() const
Return per layer quantization info.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
unsigned int pad_right() const
Get the right padding.
virtual PaddingSize padding() const =0
Padding of tensor.
unsigned int left
left of the border
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
size_t y() const
Semantic accessor for height as y.
virtual size_t offset_first_element_in_bytes() const =0
The offset from the beginning of the memory allocation to the first element of the tensor...
PoolingType
Available pooling types.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
size_t get_working_size(unsigned int num_threads) const
Get size of the workspace needed by the assembly kernel.
#define ARM_COMPUTE_RETURN_ERROR_MSG(...)
An error is returned with the given description.
PadStrideInfo pad_stride_info
Information about executing thread and CPU.
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Num samples, height, width, channels.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
bool is_pool_region_entirely_outside_input(const PoolingLayerInfo &info)
Check if the pool region is entirely outside the input tensor.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
quantized, asymmetric fixed-point 8-bit number signed
static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info)
Static function to check if given info will lead to a valid configuration.
unsigned int pad_bottom() const
Get the bottom padding.
void configure(const ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &info, const CPUInfo &cpu_info)
Initialise the kernel's src and dst.
unsigned int pad_left() const
Get the left padding.
Describe a multidimensional execution window.
static constexpr size_t default_mws
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
bool has_padding() const
Check whether this has any padding.