24 #ifndef ARM_COMPUTE_CPU_DEPTHWISE_CONV2D_NATIVE_KERNEL_H 25 #define ARM_COMPUTE_CPU_DEPTHWISE_CONV2D_NATIVE_KERNEL_H 32 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 34 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 46 using DepthwiseConv2dNativeKernelPtr =
77 const char *
name()
const override;
91 DepthwiseConv2dNativeKernelPtr _func{
nullptr };
93 bool _has_biases{
false };
const Window & window() const
The maximum window the kernel can be executed on.
Store the tensor's metadata.
ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDepthwiseConv2dNativeKernel)
decltype(strategy::transforms) typedef type
SimpleTensor< float > src
Copyright (c) 2017-2022 Arm Limited.
std::add_pointer< bool(const DepthwiseConv2dNativeDataTypeISASelectorData &data)>::type DepthwiseConv2dNativeDataTypeISASelectorPtr
CpuDepthwiseConv2dNativeKernel()=default
void configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ConvolutionInfo &info)
Initialize the function's source, destination and parameters.
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Information about executing thread and CPU.
Interface for the kernel to run a depthwise convolution native on a tensor.
static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const ConvolutionInfo &info)
Static function to check if given info will lead to a valid configuration.
const DepthwiseConv2dNativeDataTypeISASelectorPtr is_selected
static const std::vector< DepthwiseConv2dNativeKernel > & get_available_kernels()
DepthwiseConv2dNativeKernelPtr ukernel
Describe a multidimensional execution window.
const char * name() const override
Name of the kernel.