24.02.1
|
Go to the documentation of this file.
45 static const std::vector<CpuDepthwiseConv2dNativeKernel::DepthwiseConv2dNativeKernel> available_kernels = {
46 {
"neon_qu8_deptwiseconv2dnative",
47 [](
const DepthwiseConv2dNativeDataTypeISASelectorData &data) {
return (data.weights_dt ==
DataType::QASYMM8); },
49 {
"neon_qs8_deptwiseconv2dnative",
50 [](
const DepthwiseConv2dNativeDataTypeISASelectorData &data)
53 {
"neon_fp16_deptwiseconv2dnative",
54 [](
const DepthwiseConv2dNativeDataTypeISASelectorData &data)
55 {
return (data.weights_dt ==
DataType::F16 && data.isa.fp16); },
57 {
"neon_fp32_deptwiseconv2dnative",
58 [](
const DepthwiseConv2dNativeDataTypeISASelectorData &data) {
return (data.weights_dt ==
DataType::F32); },
60 {
"neon_qp8_qu8_deptwiseconv2dnative",
61 [](
const DepthwiseConv2dNativeDataTypeISASelectorData &data)
64 {
"neon_qp8_qs8_deptwiseconv2dnative",
65 [](
const DepthwiseConv2dNativeDataTypeISASelectorData &data)
71 const ITensorInfo *weights,
72 const ITensorInfo *biases,
73 const ITensorInfo *
dst,
74 const ConvolutionInfo &
info)
83 src->dimension(1) +
info.pad_stride_info.pad_left() +
info.pad_stride_info.pad_right());
85 src->dimension(2) +
info.pad_stride_info.pad_top() +
info.pad_stride_info.pad_bottom());
89 (
info.pad_stride_info.stride().second < 1));
101 if (biases !=
nullptr)
116 if (
dst->total_size() != 0)
137 _has_biases = (biases !=
nullptr);
147 ->set_is_resizable(
true)
150 .set_quantization_info(
dst->quantization_info()));
153 ICpuKernel::configure(win);
177 _func(
src, weights, biases,
dst,
window, _has_biases, _conv_info);
182 return "CpuDepthwiseConv2dNativeKernel";
185 const std::vector<CpuDepthwiseConv2dNativeKernel::DepthwiseConv2dNativeKernel> &
188 return available_kernels;
Class to describe a number of elements in each dimension.
@ QSYMM8_PER_CHANNEL
quantized, symmetric per channel fixed-point 8-bit number
SimpleTensor< float > src
void configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ConvolutionInfo &info)
Initialize the function's source, destination and parameters.
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, const ConvolutionInfo &info)
Calculate the depthwise convolution output shape of a tensor.
@ QASYMM8
quantized, asymmetric fixed-point 8-bit number unsigned
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
static const std::vector< DepthwiseConv2dNativeKernel > & get_available_kernels()
#define REGISTER_QASYMM8_SIGNED_NEON(func_name)
static CPUInfo & get()
Access the KernelLibrary singleton.
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
#define REGISTER_FP16_NEON(func_name)
#define REGISTER_QASYMM8_NEON(func_name)
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
void neon_qp8_qu8_deptwiseconv2dnative(const ITensor *src, const ITensor *weights, const ITensor *bias, ITensor *dst, const Window &window, bool has_biases, const ConvolutionInfo &info)
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
#define REGISTER_FP32_NEON(func_name)
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
void neon_qs8_deptwiseconv2dnative(const ITensor *src, const ITensor *weights, const ITensor *bias, ITensor *dst, const Window &window, bool has_biases, const ConvolutionInfo &info)
Traits defined on Arm® Neon™ vectors.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
@ QASYMM8_SIGNED
quantized, asymmetric fixed-point 8-bit number signed
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
virtual DataType data_type() const =0
Data type used for each element of the tensor.
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
void neon_qu8_deptwiseconv2dnative(const ITensor *src, const ITensor *weights, const ITensor *bias, ITensor *dst, const Window &window, bool has_biases, const ConvolutionInfo &info)
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
static const auto * get_implementation(const SelectorType &selector, KernelSelectionType selection_type=KernelSelectionType::Supported)
Micro-kernel selector.
cpuinfo::CpuIsaInfo get_isa() const
Gets the current cpu's ISA information.
const Window & window() const
The maximum window the kernel can be executed on.
static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const ConvolutionInfo &info)
Static function to check if given info will lead to a valid configuration.
Information about executing thread and CPU.
const char * name() const override
Name of the kernel.
void neon_fp16_deptwiseconv2dnative(const ITensor *src, const ITensor *weights, const ITensor *bias, ITensor *dst, const Window &window, bool has_biases, const ConvolutionInfo &info)
Describe a multidimensional execution window.
Copyright (c) 2017-2024 Arm Limited.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(...)
@ F16
16-bit floating-point number
@ S32
signed 32-bit number
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
@ UNKNOWN
Unknown data layout.
Store the tensor's metadata.
@ F32
32-bit floating-point number
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
void neon_qp8_qs8_deptwiseconv2dnative(const ITensor *src, const ITensor *weights, const ITensor *bias, ITensor *dst, const Window &window, bool has_biases, const ConvolutionInfo &info)
void neon_fp32_deptwiseconv2dnative(const ITensor *src, const ITensor *weights, const ITensor *bias, ITensor *dst, const Window &window, bool has_biases, const ConvolutionInfo &info)