23.11
|
Go to the documentation of this file.
50 const ITensorInfo *weights,
51 const ITensorInfo *biases,
52 const ITensorInfo *output,
53 const DWCComputeKernelInfo &dwc_info,
55 const ITensorInfo *output_multipliers,
56 const ITensorInfo *output_shifts)
59 bool in_place =
false;
60 if (output ==
nullptr || output ==
input)
75 "Weights cannot be exported to cl_image!");
87 const int weights_width_idx =
89 const int weights_height_idx =
92 weights->tensor_shape()[weights_height_idx] != 1U);
106 if (
conv_info.depth_multiplier > 1 && dwc_info.n0 > 1)
113 if (biases !=
nullptr)
154 if (output->total_size() != 0)
162 const UniformQuantizationInfo iq_info =
input->quantization_info().uniform();
163 const UniformQuantizationInfo wq_info = weights->quantization_info().uniform();
164 const UniformQuantizationInfo oq_info =
165 (output->total_size() != 0) ? output->quantization_info().uniform() : iq_info;
167 float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
168 int output_multiplier = 0;
169 int output_shift = 0;
183 _depth_multiplier(1),
184 _output_multipliers(nullptr),
185 _output_shifts(nullptr),
186 _export_input_to_cl_image(false),
187 _export_weights_to_cl_image(false),
203 output_multipliers, output_shifts);
217 if (output ==
nullptr)
223 input->info(), weights->
info(), (biases !=
nullptr) ? biases->
info() :
nullptr, output->
info(), dwc_info,
224 conv_info, (output_multipliers !=
nullptr) ? output_multipliers->
info() :
nullptr,
225 (output_shifts !=
nullptr) ? output_shifts->
info() :
nullptr));
240 _depth_multiplier =
conv_info.depth_multiplier;
241 _output_multipliers = output_multipliers;
242 _output_shifts = output_shifts;
248 const unsigned int m0 = std::min(dwc_info.
m0, (
unsigned int)output->
info()->
dimension(1));
254 if (_export_input_to_cl_image)
259 if (_export_weights_to_cl_image)
266 const auto act_function =
conv_info.act_info.activation();
270 (act_function == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU ||
271 act_function == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) &&
276 build_opts.
add_option(
"-cl-unsafe-math-optimizations");
280 build_opts.
add_option(
"-cl-fast-relaxed-math");
285 build_opts.
add_option_if_else(_export_input_to_cl_image,
"-DSRC_TENSOR_TYPE=IMAGE",
"-DSRC_TENSOR_TYPE=BUFFER");
290 build_opts.
add_option(
"-DDST_TENSOR_TYPE=BUFFER");
292 build_opts.
add_option_if_else(_export_weights_to_cl_image,
"-DWEI_TENSOR_TYPE=IMAGE",
"-DWEI_TENSOR_TYPE=BUFFER");
319 if (biases !=
nullptr)
321 build_opts.
add_option(std::string(
"-DHAS_BIAS"));
335 zero_value.
get(zero_value_s32);
338 int output_multiplier = 0;
339 int output_shift = 0;
348 build_opts.
add_option(
"-DDST_MULTIPLIERS_DATA_TYPE=" +
350 build_opts.
add_option(
"-DDST_SHIFTS_DATA_TYPE=" +
353 "-DQUANTIZATION_TYPE=PER_CHANNEL",
"-DQUANTIZATION_TYPE=PER_TENSOR");
357 std::tie(b_val, a_val) =
374 ICLKernel::configure_internal(win);
422 cl::Image2D input_cl_image;
423 cl::Image2D weights_cl_image;
425 if (_export_input_to_cl_image || _export_weights_to_cl_image)
428 if (_export_input_to_cl_image)
431 const size_t image_h =
440 if (_export_weights_to_cl_image)
443 const size_t image_h =
453 unsigned int idx = 0;
454 if (_export_input_to_cl_image)
456 _kernel.setArg(idx++, input_cl_image);
460 if (_export_weights_to_cl_image)
462 _kernel.setArg(idx++, weights_cl_image);
470 if (_biases !=
nullptr)
Class to describe a number of elements in each dimension.
std::string to_string(T &&value)
Convert integer and float values to string.
@ QSYMM8_PER_CHANNEL
quantized, symmetric per channel fixed-point 8-bit number
void add_option_if_else(bool cond, std::string option_true, std::string option_false)
Adds first option if condition is true else the second one.
void add_1D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 1D tensor's parameters to the object's kernel's arguments starting from the index idx.
bool export_to_cl_image(const ITensorInfo *tensor)
Class describing the value of a pixel for any image format.
const StringSet & options() const
Gets the current options list set.
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, const ConvolutionInfo &info)
Calculate the depthwise convolution output shape of a tensor.
@ NHWC
Num samples, height, width, channels.
unsigned int n0
Number of columns processed by each thread.
@ QASYMM8
quantized, asymmetric fixed-point 8-bit number unsigned
void get(uint8_t &v) const
Interpret the pixel value as a U8.
std::string lower_string(const std::string &val)
Lower a given string.
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
std::pair< int32_t, int32_t > get_quantized_activation_min_max(const ActivationLayerInfo &act_info, DataType data_type, UniformQuantizationInfo oq_info)
Returns a pair of minimum and maximum values for a quantized activation.
@ DEPTHWISE
Depthwise CL kernel type.
Interface for OpenCL tensor.
virtual const cl::Buffer & cl_buffer() const =0
Interface to be implemented by the child class to return a reference to the OpenCL buffer containing ...
const std::string & string_from_data_type(DataType dt)
Convert a data type identity into a string.
GPUTarget get_target() const
Get the targeted GPU architecture.
unsigned int m0
Number of rows processed by each thread.
static CLKernelLibrary & get()
Access the KernelLibrary singleton.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Manages all the OpenCL kernels compilation and caching, provides accessors for the OpenCL Context.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
#define ARM_COMPUTE_ERROR_THROW_ON(status)
void run(const Window &window, cl::CommandQueue &queue) override
Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
void add_option(std::string option)
Adds option to the existing build option list.
Window collapse(const Window &full_window, size_t first, size_t last=Coordinates::num_max_dimensions) const
Collapse the dimensions between first and last.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
#define ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(tensor)
cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set< std::string > &build_opts=std::set< std::string >())
Creates an opencl kernel using a compile context.
@ QASYMM8_SIGNED
quantized, asymmetric fixed-point 8-bit number signed
void add_option_if(bool cond, std::string option)
Adds option if a given condition is true;.
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
std::string float_to_string_with_full_precision(float val)
Create a string with the float in full precision.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
bool is_data_type_quantized_per_channel(DataType dt)
Check if a given data type is of per channel type.
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCComputeKernelInfo &dwc_info, const ConvolutionInfo &conv_info, const ITensorInfo *output_multipliers=nullptr, const ITensorInfo *output_shifts=nullptr)
Static function to check if given info will lead to a valid configuration of CLDepthwiseConvolutionLa...
UniformQuantizationInfo uniform() const
Return per layer quantization info.
bool export_input_to_cl_image
Export input to cl_image.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
bool export_weights_to_cl_image
Export the weights to cl_image.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(t,...)
void update_padding_for_cl_image(ITensorInfo *tensor)
Update padding required to export the OpenCL buffer to OpenCL image2d.
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
const Window & window() const
The maximum window the kernel can be executed on.
std::string get_cl_type_from_data_type(const DataType &dt)
Translates a tensor data type to the appropriate OpenCL type.
GPUTarget
Available GPU Targets.
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
void add_4d_tensor_nhwc_argument(unsigned int &idx, const ICLTensor *tensor)
Add the passed NHWC 4D tensor's parameters to the object's kernel's arguments by passing strides,...
CLDepthwiseConvolutionLayerNativeKernel()
Default Constructor.
Describe a multidimensional execution window.
cl::Image2D create_image2d_from_buffer(const cl::Context &ctx, const cl::Buffer &buffer, const TensorShape &shape2d, DataType data_type, size_t image_row_pitch, CLImage2DType image_type)
Create a cl::Image2D object from an OpenCL buffer.
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Copyright (c) 2017-2023 Arm Limited.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(...)
@ F16
16-bit floating-point number
unsigned int adjust_vec_size(unsigned int vec_size, size_t dim0)
Returns the adjusted vector size in case it is less than the input's first dimension,...
bool has_padding_changed(const std::unordered_map< const ITensorInfo *, PaddingSize > &padding_map)
Check if the previously stored padding info has changed after configuring a kernel.
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Compute descriptor used by the depthwise convolution native kernel.
@ S32
signed 32-bit number
virtual const Strides & strides_in_bytes() const =0
The strides in bytes for accessing each dimension of the tensor.
cl::NDRange lws_hint() const
Return the Local-Workgroup-Size hint.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplier, int32_t *shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier.
const std::string & string_from_activation_func(const ActivationFunction &act)
Translates a given activation function to a string.
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Store the tensor's metadata.
Window first_slice_window_4D() const
First 4D slice of the window.
@ F32
32-bit floating-point number
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCComputeKernelInfo &dwc_info, const ConvolutionInfo &conv_info, const ICLTensor *output_multipliers=nullptr, const ICLTensor *output_shifts=nullptr)
Initialize the function's source, destination and parameters.
std::unordered_map< const ITensorInfo *, PaddingSize > get_padding_info(std::initializer_list< const ITensorInfo * > infos)
Stores padding information before configuring a kernel.
SimpleTensor< T > slice(const SimpleTensor< T > &src, Coordinates starts, Coordinates ends)
void set_unroll_with_pragma(CLBuildOptions &built_opts, std::initializer_list< int > values)
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint=CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items=false)
Add the kernel to the command queue with the given window.