48 static const std::vector<CpuLogits1DMaxKernel::SoftmaxLogits1DMaxKernel> available_kernels_max_logits =
51 "sve_fp32_logits_1d_max",
52 [](
const DataTypeISASelectorData & data) {
return (data.dt ==
DataType::F32) && data.isa.sve; },
56 "sve_fp16_logits_1d_max",
57 [](
const DataTypeISASelectorData & data) {
return (data.dt ==
DataType::F16) && data.isa.sve && data.isa.fp16; },
61 "sve_qu8_logits_1d_max",
62 [](
const DataTypeISASelectorData & data) {
return (data.dt ==
DataType::QASYMM8) && data.isa.sve; },
66 "sve_qs8_logits_1d_max",
71 "neon_fp32_logits_1d_max",
72 [](
const DataTypeISASelectorData & data) {
return (data.dt ==
DataType::F32); },
76 "neon_fp16_logits_1d_max",
77 [](
const DataTypeISASelectorData & data) {
return (data.dt ==
DataType::F16) && data.isa.fp16; },
81 "neon_qu8_logits_1d_max",
82 [](
const DataTypeISASelectorData & data) {
return (data.dt ==
DataType::QASYMM8); },
86 "neon_qs8_logits_1d_max",
92 Status validate_arguments_logits_1d_max(
const ITensorInfo &
input,
const ITensorInfo &output)
98 if(output.total_size() != 0)
110 return available_kernels_max_logits;
126 _run_method = uk->ukernel;
127 _name = std::string(
"CpuLogits1DMaxKernel").append(
"/").append(uk->name);
130 ICpuKernel::configure(win);
151 _run_method(
src,
dst, window);
156 return _name.c_str();
160 template <
bool IS_LOG>
161 static const std::vector<typename CpuLogits1DSoftmaxKernel<IS_LOG>::SoftmaxLogits1DKernel> available_kernels_logits =
164 "sve2_qu8_softmax_logits_1d",
169 "sve2_qs8_softmax_logits_1d",
174 "sve_fp32_softmax_logits_1d",
179 "sve_fp16_softmax_logits_1d",
185 "neon_fp32_softmax_logits_1d",
190 "neon_fp16_softmax_logits_1d",
195 "neon_qu8_softmax_logits_1d",
200 "neon_qs8_softmax_logits_1d",
245 template <
bool IS_LOG>
248 return available_kernels_logits<IS_LOG>;
251 template <
bool IS_LOG>
271 std::string
kernel_name = IS_LOG ? std::string(
"CpuLogits1DLogSoftmaxKernel") : std::string(
"CpuLogits1DSoftmaxKernel");
274 _run_method = uk->ukernel;
275 _name = kernel_name.append(
"/").append(uk->name);
283 template <
bool IS_LOG>
293 template <
bool IS_LOG>
311 void *tmp_for_thread = tmp->buffer() + (info.
thread_id * tmp_size_for_thread);
312 _run_method(
src, max, tmp_for_thread,
dst, _beta, IS_LOG, window);
315 template <
bool IS_LOG>
318 return _name.c_str();
const char * name() const override
Name of the kernel.
void neon_fp32_softmax(const ITensor *in, const ITensor *max, void *const tmp, ITensor *out, const float beta, bool is_log, const Window &window)
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
static Status validate(const ITensorInfo *src, const ITensorInfo *dst)
Static function to check if given info will lead to a valid configuration.
const Window & window() const
The maximum window the kernel can be executed on.
void configure(const ITensorInfo *src, const ITensorInfo *max, ITensorInfo *dst, const float beta, ITensorInfo *tmp)
Set the input and output tensors.
void neon_qasymm8_softmax(const ITensor *in, const ITensor *max, void *const tmp, ITensor *out, const float beta, bool is_log, const Window &window)
void neon_qasymm8_signed_softmax(const ITensor *in, const ITensor *max, void *const tmp, ITensor *out, const float beta, bool is_log, const Window &window)
static const auto * get_implementation(const SelectorType &selector, KernelSelectionType selection_type=KernelSelectionType::Supported)
Micro-kernel selector.
static const std::vector< SoftmaxLogits1DKernel > & get_available_kernels()
void sve_qasymm8_signed_logits(const ITensor *in, ITensor *out, const Window &window)
#define ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(tensor)
#define REGISTER_FP16_NEON(func_name)
void neon_qasymm8_logits(const ITensor *in, ITensor *out, const Window &window)
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(...)
void sve2_qasymm8_softmax(const ITensor *in, const ITensor *max, void *const tmp, ITensor *out, const float beta, bool is_log, const Window &window)
#define REGISTER_FP32_NEON(func_name)
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
#define REGISTER_FP32_SVE(func_name)
ITensorInfo & set_data_type(DataType data_type) override
Set the data type to the specified value.
#define REGISTER_QASYMM8_SVE(func_name)
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
#define REGISTER_QASYMM8_SIGNED_NEON(func_name)
Store the tensor's metadata.
QuantizationInfo get_softmax_output_quantization_info(DataType input_type, bool is_log)
Returns output quantization information for softmax layer.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Interface for softmax computation for QASYMM8 with pre-computed max.
const char * name() const override
Name of the kernel.
virtual ITensorInfo & reset_padding()=0
Resets the padding settings of the tensor.
void sve_fp32_logits(const ITensor *in, ITensor *out, const Window &window)
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
void sve_fp16_softmax(const ITensor *in, const ITensor *max, void *const tmp, ITensor *out, const float beta, bool is_log, const Window &window)
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(...)
SimpleTensor< float > src
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
void neon_qasymm8_singed_logits(const ITensor *in, ITensor *out, const Window &window)
ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info) override
Set the quantization settings (scale and offset) of the tensor.
#define REGISTER_QASYMM8_SIGNED_SVE(func_name)
void sve_qasymm8_logits(const ITensor *in, ITensor *out, const Window &window)
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Quantization information.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
#define REGISTER_QASYMM8_NEON(func_name)
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
Class to describe a number of elements in each dimension.
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.
unsigned int num_elems_processed_per_iteration
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
void configure(const ITensorInfo *src, ITensorInfo *dst)
Set the input and output tensors.
static const std::vector< SoftmaxLogits1DMaxKernel > & get_available_kernels()
#define REGISTER_QASYMM8_SIGNED_SVE2(func_name)
void sve_fp16_logits(const ITensor *in, ITensor *out, const Window &window)
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
Information about executing thread and CPU.
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
#define REGISTER_FP16_SVE(func_name)
static Status validate(const ITensorInfo *src, const ITensorInfo *max, const ITensorInfo *dst, const float beta, const ITensorInfo *tmp)
Static function to check if given info will lead to a valid configuration.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
void sve_fp32_softmax(const ITensor *in, const ITensor *max, void *const tmp, ITensor *out, const float beta, bool is_log, const Window &window)
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
void neon_fp32_logits(const ITensor *in, ITensor *out, const Window &window)
void neon_fp16_softmax(const ITensor *in, const ITensor *max, void *const tmp, ITensor *out, const float beta, bool is_log, const Window &window)
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Store the tensor's metadata.
quantized, asymmetric fixed-point 8-bit number signed
void neon_fp16_logits(const ITensor *in, ITensor *out, const Window &window)
static CPUInfo & get()
Access the KernelLibrary singleton.
#define REGISTER_QASYMM8_SVE2(func_name)
DataType
Available data types.
void sve2_qasymm8_signed_softmax(const ITensor *in, const ITensor *max, void *const tmp, ITensor *out, const float beta, bool is_log, const Window &window)
Describe a multidimensional execution window.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
cpuinfo::CpuIsaInfo get_isa() const
Gets the current cpu's ISA information.
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override
Execute the kernel on the passed window.