23.08
|
Go to the documentation of this file.
56 static std::set<ActivationLayerInfo::ActivationFunction> quantized_supported_activations =
58 ActivationLayerInfo::ActivationFunction::RELU,
59 ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
60 ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
61 ActivationLayerInfo::ActivationFunction::LOGISTIC,
62 ActivationLayerInfo::ActivationFunction::TANH,
63 ActivationLayerInfo::ActivationFunction::HARD_SWISH,
64 ActivationLayerInfo::ActivationFunction::LEAKY_RELU,
67 const QuantizationInfo &oq_info = (
dst !=
nullptr) ?
dst->quantization_info() :
src->quantization_info();
71 "For Quantized data type only hard swish, leaky relu, tanh, logistic, relu and lower/upper bounded relu are supported");
83 if((
dst !=
nullptr) && (
dst->total_size() != 0))
104 _run_in_place = (
dst ==
nullptr) || (
dst ==
src);
122 const bool perform_activation_in_float =
123 (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC)
124 || (f_act == ActivationLayerInfo::ActivationFunction::TANH)
125 || (f_act == ActivationLayerInfo::ActivationFunction::HARD_SWISH)
126 || (f_act == ActivationLayerInfo::ActivationFunction::LEAKY_RELU);
130 build_opts.
add_option_if(perform_activation_in_float,
"-DFLOAT_DOMAIN");
137 std::string
kernel_name = std::string(
"activation_layer");
144 if(!perform_activation_in_float)
188 kernel_name += perform_activation_in_float ? std::string(
"_quant_f32") : std::string(
"_quant");
195 if(iq_info != oq_info)
214 ICLKernel::configure_internal(win);
217 _config_id =
"activation_layer_";
247 unsigned int idx = 0;
Class to describe a number of elements in each dimension.
std::string to_string(T &&value)
Convert integer and float values to string.
SimpleTensor< float > src
const StringSet & options() const
Gets the current options list set.
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
int16_t quantize_qsymm16(float value, const UniformQuantizationInfo &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given a 16-bit symmetric quantization scheme.
@ QASYMM8
quantized, asymmetric fixed-point 8-bit number unsigned
arm_compute::ActivationFunction ActivationFunction
std::string lower_string(const std::string &val)
Lower a given string.
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
uint8_t quantize_qasymm8(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given an unsigned 8-bit asymmetric quantization scheme.
Window collapse_if_possible(const Window &full_window, size_t first, size_t last, bool *has_collapsed=nullptr) const
Collapse the dimensions between first and last if possible.
bool is_data_type_quantized_symmetric(DataType dt)
Check if a given data type is of symmetric quantized type.
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
@ QSYMM16
quantized, symmetric fixed-point 16-bit number
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Activation Layer Information class.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
void add_option(std::string option)
Adds option to the existing build option list.
void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, ActivationLayerInfo act_info)
Configure kernel for a given list of arguments.
int8_t quantize_qasymm8_signed(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given a signed 8-bit asymmetric quantization scheme.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
#define ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(tensor)
ActivationFunction
Available activation functions.
cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set< std::string > &build_opts=std::set< std::string >())
Creates an opencl kernel using a compile context.
@ QASYMM8_SIGNED
quantized, asymmetric fixed-point 8-bit number signed
void add_option_if(bool cond, std::string option)
Adds option if a given condition is true;.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
std::string float_to_string_with_full_precision(float val)
Create a string with the float in full precision.
bool slide_window_slice_3D(Window &slice) const
Slide the passed 3D window slice.
static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const ActivationLayerInfo &act_info)
Static function to check if given info will lead to a valid configuration.
void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override
Window first_slice_window_3D() const
First 3D slice of the window.
const Window & window() const
The maximum window the kernel can be executed on.
std::string get_cl_type_from_data_type(const DataType &dt)
Translates a tensor data type to the appropriate OpenCL type.
void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 3D tensor's parameters to the object's kernel's arguments starting from the index idx.
Describe a multidimensional execution window.
@ ELEMENTWISE
Elementwise CL kernel type.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Copyright (c) 2017-2023 Arm Limited.
@ F16
16-bit floating-point number
unsigned int adjust_vec_size(unsigned int vec_size, size_t dim0)
Returns the adjusted vector size in case it is less than the input's first dimension,...
bool has_padding_changed(const std::unordered_map< const ITensorInfo *, PaddingSize > &padding_map)
Check if the previously stored padding info has changed after configuring a kernel.
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
cl::NDRange lws_hint() const
Return the Local-Workgroup-Size hint.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
const std::string & string_from_activation_func(const ActivationFunction &act)
Translates a given activation function to a string.
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Store the tensor's metadata.
@ F32
32-bit floating-point number
unsigned int num_elems_processed_per_iteration
DataType
Available data types.
std::unordered_map< const ITensorInfo *, PaddingSize > get_padding_info(std::initializer_list< const ITensorInfo * > infos)
Stores padding information before configuring a kernel.
SimpleTensor< T > slice(const SimpleTensor< T > &src, Coordinates starts, Coordinates ends)
void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint=CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items=false)
Add the kernel to the command queue with the given window.