24.02.1
|
Go to the documentation of this file.
37 #if defined(ENABLE_FP32_KERNELS)
40 static constexpr
size_t default_mws_N1_fp32_neon = 24385;
41 static constexpr
size_t default_mws_V1_fp32_neon = 40520;
53 using CpuSubKernelDataTypeISASelectorData = CpuAddKernelDataTypeISASelectorData;
56 static const std::vector<CpuSubKernel::SubKernel> available_kernels = {
57 {
"neon_fp32_sub", [](
const CpuSubKernelDataTypeISASelectorData &data) {
return (data.dt ==
DataType::F32); },
60 [](
const CpuSubKernelDataTypeISASelectorData &data) {
return (data.dt ==
DataType::F16) && data.isa.fp16; },
62 {
"neon_u8_sub", [](
const CpuSubKernelDataTypeISASelectorData &data) {
return (data.dt ==
DataType::U8); },
64 {
"neon_s16_sub", [](
const CpuSubKernelDataTypeISASelectorData &data) {
return (data.dt ==
DataType::S16); },
66 {
"neon_s32_sub", [](
const CpuSubKernelDataTypeISASelectorData &data) {
return (data.dt ==
DataType::S32); },
68 {
"neon_qu8_sub_fixedpoint",
69 [](
const CpuSubKernelDataTypeISASelectorData &data)
72 {
"neon_qs8_sub_fixedpoint",
73 [](
const CpuSubKernelDataTypeISASelectorData &data)
76 {
"neon_qu8_sub", [](
const CpuSubKernelDataTypeISASelectorData &data) {
return (data.dt ==
DataType::QASYMM8); },
81 {
"neon_qs16_sub", [](
const CpuSubKernelDataTypeISASelectorData &data) {
return (data.dt ==
DataType::QSYMM16); },
96 const auto uk = CpuSubKernel::get_implementation<CpuSubKernelDataTypeISASelectorData>(
97 CpuSubKernelDataTypeISASelectorData{src0.data_type(),
CPUInfo::get().get_isa(), can_use_fixedpoint});
105 "Convert policy cannot be WRAP if datatype is quantized");
108 if (
dst.total_size() > 0)
112 "Wrong shape for dst");
130 const auto uk = CpuSubKernel::get_implementation<CpuSubKernelDataTypeISASelectorData>(
131 CpuSubKernelDataTypeISASelectorData{src0->
data_type(),
CPUInfo::get().get_isa(), can_use_fixedpoint});
136 _run_method = uk->ukernel;
137 _name = std::string(
"CpuSubKernel").append(
"/").append(uk->name);
143 ICpuKernel::configure(win);
150 #if defined(ENABLE_FP32_KERNELS)
151 if (this->_run_method == &sub_same_neon<float>)
156 mws = default_mws_N1_fp32_neon;
160 mws = default_mws_V1_fp32_neon;
178 return std::max(
static_cast<size_t>(1), mws);
207 _run_method(src0, src1,
dst, _policy,
window);
212 return _name.c_str();
217 return available_kernels;
void sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
void sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
size_t get_mws(const CPUInfo &platform, size_t thread_count) const override
Return minimum workload size of the relevant kernel.
@ QASYMM8
quantized, asymmetric fixed-point 8-bit number unsigned
bool set_data_type_if_unknown(ITensorInfo &info, DataType data_type)
Set the data type and number of channels to the specified value if the current data type is unknown.
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
#define REGISTER_QASYMM8_SIGNED_NEON(func_name)
bool sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst)
static CPUInfo & get()
Access the KernelLibrary singleton.
constexpr size_t num_iterations(size_t dimension) const
Return the number of iterations needed to iterate through a given dimension.
Interface for CPU tensor.
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
#define REGISTER_FP16_NEON(func_name)
static Status validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst, ConvertPolicy policy)
Static function to check if given info will lead to a valid configuration.
#define REGISTER_QASYMM8_NEON(func_name)
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
@ QSYMM16
quantized, symmetric fixed-point 16-bit number
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
#define REGISTER_FP32_NEON(func_name)
#define REGISTER_QSYMM16_NEON(func_name)
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
const char * name() const override
Name of the kernel.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
void sub_qsymm16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
@ U8
unsigned 8-bit number
@ S16
signed 16-bit number
bool have_different_dimensions(const Dimensions< T > &dim1, const Dimensions< T > &dim2, unsigned int upper_dim)
@ QASYMM8_SIGNED
quantized, asymmetric fixed-point 8-bit number signed
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
#define REGISTER_INTEGER_NEON(func_name)
virtual DataType data_type() const =0
Data type used for each element of the tensor.
void configure(const ITensorInfo *src0, const ITensorInfo *src1, ITensorInfo *dst, ConvertPolicy policy)
Initialise the kernel's src and dst.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
static const std::vector< SubKernel > & get_available_kernels()
const Window & window() const
The maximum window the kernel can be executed on.
Information about executing thread and CPU.
bool set_shape_if_empty(ITensorInfo &info, const TensorShape &shape)
Set the shape to the specified value if the current assignment is empty.
Describe a multidimensional execution window.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
std::pair< Window, size_t > calculate_squashed_or_max_window(const ITensorInfo &src0, const ITensorInfo &src1)
Copyright (c) 2017-2024 Arm Limited.
ConvertPolicy
Policy to handle integer overflow.
@ F16
16-bit floating-point number
@ S32
signed 32-bit number
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
size_t num_iterations_total() const
Return the total number of iterations needed to iterate through the entire window.
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Store the tensor's metadata.
void sub_same_neon_fp16(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
@ F32
32-bit floating-point number
static TensorShape broadcast_shape(const Shapes &...shapes)
If shapes are broadcast compatible, return the broadcasted shape.
std::add_pointer< bool(const CpuAddKernelDataTypeISASelectorData &data)>::type CpuAddKernelDataTypeISASelectorDataPtr
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
void sub_qasymm8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
void sub_qasymm8_signed_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
static constexpr size_t default_mws
CPUModel get_cpu_model(unsigned int cpuid) const
Gets the cpu model for a given cpuid.