Compute Library
 22.05
ClGemmLowpOffsetContributionOutputStageKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
30 #include "arm_compute/core/Utils.h"
32 
35 
36 #include "support/Cast.h"
37 #include "support/StringSupport.h"
38 
39 namespace arm_compute
40 {
41 namespace opencl
42 {
43 namespace kernels
44 {
45 namespace
46 {
47 Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *dst,
48  int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
49 {
51 
52  if(bias != nullptr)
53  {
55  ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
56  ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
57  }
58 
60  ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1);
62  ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1);
63  if(output_stage.is_quantized_per_channel)
64  {
65  ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != output_shifts->dimension(0));
66  ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != output_multipliers->dimension(0));
67  }
68 
69  // If a_offset == 0, vector_sum_col can be a nullptr
70  if(a_offset != 0)
71  {
73  ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
74  }
75 
76  // If b_offset == 0, vector_sum_row can be a nullptr
77  if(b_offset != 0)
78  {
80 
81  // Check if input is a 3D reinterpretation
82  const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
83 
84  // Validate input
85  ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
86  ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
87 
88  TensorShape output_shape = mm_result->tensor_shape();
89  if(output_shape.num_dimensions() > 1)
90  {
91  const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
92 
93  TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
94  vector_sum_row_shape.collapse_from(1);
95  output_shape.collapse_from(output_batch_idx);
96 
97  ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
98  "mm_result tensor must have the same number of batches of output tensor");
99 
100  if(a_offset != 0)
101  {
102  TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
103  vector_sum_col_shape.collapse_from(1);
104 
105  ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
106  "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
107  }
108  }
109  }
110 
112  // Checks performed when output is configured
113  if((dst != nullptr) && (dst->total_size() != 0))
114  {
115  ARM_COMPUTE_RETURN_ERROR_ON(output_stage.output_data_type != dst->data_type());
118  }
119 
120  ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
121  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output_stage.gemmlowp_multipliers.size() != output_stage.gemmlowp_shifts.size(), "per channel quantization info is incorrect");
122 
123  return Status{};
124 }
125 } // namespace
126 
128 {
130 }
131 
133  const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, ITensorInfo *dst,
134  int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
135  const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
136 {
137  // Perform validate step
138  ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, dst, output_multipliers, output_shifts);
139  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, dst, a_offset, b_offset, output_stage, output_multipliers, output_shifts));
140 
141  auto padding_info = get_padding_info({ mm_result, vector_sum_col, vector_sum_row, bias, dst, output_multipliers, output_shifts });
142 
143  const int min = output_stage.gemmlowp_min_bound;
144  const int max = output_stage.gemmlowp_max_bound;
145 
146  _is_quantized_per_channel = output_stage.is_quantized_per_channel;
147 
148  // Check if input is a 3D reinterpretation
149  const bool reinterpret_as_3d = vector_sum_row != nullptr
150  && mm_result->num_dimensions() > 1
151  && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
152 
153  // Auto initialize the output
154  auto_init_if_empty(*dst, mm_result->clone()->set_data_type(output_stage.output_data_type));
155 
156  const unsigned int num_elems_processed_per_iteration = adjust_vec_size(4, mm_result->dimension(0));
157 
158  // Set the arguments to pass at compile time
159  CLBuildOptions build_opts;
160  build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
161  build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(mm_result->dimension(0) % num_elems_processed_per_iteration));
162 
163  // If a_offset == 0, vector_sum_col can be a nullptr
164  if(a_offset != 0)
165  {
166  build_opts.add_option("-DA_OFFSET=" + support::cpp11::to_string(a_offset));
167  build_opts.add_option_if(vector_sum_col->tensor_shape().num_dimensions() > 1, "-DSUM_COL_HAS_BATCHES");
168  }
169  // If b_offset == 0, vector_sum_row can be a nullptr
170  build_opts.add_option_if(b_offset != 0, "-DB_OFFSET=" + support::cpp11::to_string(b_offset));
171  build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(a_offset * b_offset * k));
172  build_opts.add_option_if(reinterpret_as_3d, "-DHEIGHT_INPUT3D=" + support::cpp11::to_string(mm_result->dimension(1)));
173  build_opts.add_option_if(reinterpret_as_3d, "-DDEPTH_INPUT3D=" + support::cpp11::to_string(mm_result->dimension(2)));
174  build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
175  build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(output_stage.gemmlowp_offset));
176  build_opts.add_option("-DRESULT_MULTIPLIER=" + support::cpp11::to_string(output_stage.gemmlowp_multipliers[0]));
177  build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage.gemmlowp_shifts[0]));
178  build_opts.add_option_if(_is_quantized_per_channel, "-DPER_CHANNEL_QUANTIZATION");
179  build_opts.add_option("-DOUTPUT_DATA_TYPE=" + get_cl_type_from_data_type(dst->data_type()));
180 
181  PixelValue min_val{};
182  PixelValue max_val{};
183  std::tie(min_val, max_val) = get_min_max(dst->data_type());
184  build_opts.add_option_if((min > min_val.get<int32_t>()), "-DMIN_BOUND=" + support::cpp11::to_string(min));
185  build_opts.add_option_if((max < max_val.get<int32_t>()), "-DMAX_BOUND=" + support::cpp11::to_string(max));
186 
187  std::string kernel_name("gemmlowp_offset_contribution");
188  kernel_name += "_" + string_from_gemmlowp_output_stage(output_stage.type);
189 
190  // Create kernel
191  _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
192 
193  // Configure kernel window
194  Window win = calculate_max_window(*mm_result, Steps(num_elems_processed_per_iteration));
195  ICLKernel::configure_internal(win);
196 
197  // Set config_id for enabling LWS tuning
198  _config_id = kernel_name + "_";
199  _config_id += support::cpp11::to_string(mm_result->dimension(0));
200  _config_id += "_";
201  _config_id += support::cpp11::to_string(mm_result->dimension(1));
202  _config_id += "_";
203  _config_id += support::cpp11::to_string(mm_result->dimension(2));
204 
206 }
207 
208 Status ClGemmLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
209  const ITensorInfo *dst, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
210  const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
211 {
212  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, dst, a_offset, b_offset, output_stage, output_multipliers, output_shifts));
213  return Status{};
214 }
215 
217 {
220 
221  const auto mm_result = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
222  const auto bias = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_BIAS));
223  const auto vector_sum_col = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_VEC_COL_SUM));
224  const auto vector_sum_row = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_VEC_ROW_SUM));
225  const auto output_shifts = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SHIFTS));
226  const auto output_multipliers = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_MULTIPLIERS));
227  auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
228 
230  Window slice = collapsed.first_slice_window_3D();
231 
232  // Set window for vector_sum_col
233  Window win_vector_sum_col = slice;
234  win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
235  win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
236 
237  // Set window for vector_sum_row
238  Window win_vector_sum_row = slice;
239  win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
240  win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
241  win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
242 
243  Window biases_slice = slice;
244  biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1));
245  biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
246 
247  do
248  {
249  unsigned int idx = 0;
250  add_3D_tensor_argument(idx, mm_result, slice);
251  add_2D_tensor_argument_if((vector_sum_col != nullptr), idx, vector_sum_col, win_vector_sum_col);
252  add_2D_tensor_argument_if((vector_sum_row != nullptr), idx, vector_sum_row, win_vector_sum_row);
253  add_1D_tensor_argument_if((bias != nullptr), idx, bias, biases_slice);
254  add_3D_tensor_argument(idx, dst, slice);
255  add_1D_tensor_argument_if(_is_quantized_per_channel, idx, output_multipliers, biases_slice);
256  add_1D_tensor_argument_if(_is_quantized_per_channel, idx, output_shifts, biases_slice);
257  enqueue(queue, *this, slice, lws_hint());
258  }
259  while(collapsed.slide_window_slice_3D(slice));
260 }
261 } // namespace kernels
262 } // namespace opencl
263 } // namespace arm_compute
void add_1D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 1D tensor&#39;s parameters to the object&#39;s kernel&#39;s arguments starting from the index idx ...
Definition: ICLKernel.h:190
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override
Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue...
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
void add_2D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 2D tensor&#39;s parameters to the object&#39;s kernel&#39;s arguments starting from the index idx ...
Definition: ICLKernel.h:214
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint=CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items=false)
Add the kernel to the command queue with the given window.
Definition: ICLKernel.cpp:32
cl::NDRange lws_hint() const
Return the Local-Workgroup-Size hint.
Definition: ICLKernel.h:384
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
std::string to_string(T &&value)
Convert integer and float values to string.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Describe one of the image&#39;s dimensions with a start, end and step.
Definition: Window.h:79
int32_t gemmlowp_offset
GEMMLowp output stage offset used for quantizing to QASYMM8.
Definition: Types.h:2041
Status class.
Definition: Error.h:52
const std::string & string_from_gemmlowp_output_stage(GEMMLowpOutputStageType output_stage)
Translates a given GEMMLowp output stage to a string.
Definition: Utils.cpp:260
int32_t gemmlowp_max_bound
GEMMLowp max value used to saturate down the output result before converting back to QASYMM8...
Definition: Types.h:2045
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
GEMMLowpOutputStageType type
GEMMLowp output stage type.
Definition: Types.h:2040
void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 3D tensor&#39;s parameters to the object&#39;s kernel&#39;s arguments starting from the index idx...
Definition: ICLKernel.h:227
Copyright (c) 2017-2022 Arm Limited.
bool is_quantized_per_channel
GEMMLowp quantized per-channel flag.
Definition: Types.h:2049
std::vector< int32_t > gemmlowp_shifts
GEMMLowp output stage multiplier used for quantizing to QASYMM8.
Definition: Types.h:2047
1 channel, 1 S32 per channel
void add_option(std::string option)
Adds option to the existing build option list.
T x() const
Alias to access the size of the first dimension.
Definition: Dimensions.h:87
const OutputStage & output_stage
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Definition: ITensorPack.cpp:54
cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set< std::string > &build_opts=std::set< std::string >())
Creates an opencl kernel using a compile context.
Definition: CLHelpers.cpp:391
static constexpr size_t DimX
Alias for dimension 0 also known as X dimension.
Definition: Window.h:43
Window collapse_if_possible(const Window &full_window, size_t first, size_t last, bool *has_collapsed=nullptr) const
Collapse the dimensions between first and last if possible.
Definition: Window.inl:68
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
Class to describe a number of elements in each dimension.
Definition: Steps.h:40
unsigned int num_elems_processed_per_iteration
std::vector< int32_t > gemmlowp_multipliers
GEMMLowp output stage multiplier used for quantizing to QASYMM8.
Definition: Types.h:2046
std::string get_cl_type_from_data_type(const DataType &dt)
Translates a tensor data type to the appropriate OpenCL type.
Definition: CLHelpers.cpp:39
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
GEMMLowp output stage info.
Definition: Types.h:2038
void set(size_t dimension, const Dimension &dim)
Set the values of a given dimension.
Definition: Window.inl:49
void configure(const CLCompileContext &compile_context, const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, ITensorInfo *dst, int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
Initialise the kernel&#39;s input and output.
Elementeise CL kernel type.
Definition: CLTypes.h:84
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:915
bool has_padding_changed(const std::unordered_map< const ITensorInfo *, PaddingSize > &padding_map)
Check if the previously stored padding info has changed after configuring a kernel.
Definition: Utils.cpp:601
CLCompileContext class.
static constexpr size_t DimY
Alias for dimension 1 also known as Y dimension.
Definition: Window.h:45
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
Definition: ITensorPack.cpp:64
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)
Definition: Validate.h:439
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *dst, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
Static function to check if given info will lead to a valid configuration.
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:143
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
std::unordered_map< const ITensorInfo *, PaddingSize > get_padding_info(std::initializer_list< const ITensorInfo *> infos)
Stores padding information before configuring a kernel.
Definition: Utils.cpp:586
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
Tensor packing service.
Definition: ITensorPack.h:39
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
unsigned int adjust_vec_size(unsigned int vec_size, size_t dim0)
Returns the adjusted vector size in case it is less than the input&#39;s first dimension, getting rounded down to its closest valid vector size.
Definition: Utils.h:1222
T y() const
Alias to access the size of the second dimension.
Definition: Dimensions.h:92
quantized, asymmetric fixed-point 8-bit number signed
int32_t gemmlowp_min_bound
GEMMLowp min value used to saturate down the output result before converting back to QASYMM8...
Definition: Types.h:2044
DataType output_data_type
Output tensor data type to use if the output is not initialized.
Definition: Types.h:2050
std::string kernel_name
std::tuple< PixelValue, PixelValue > get_min_max(DataType dt)
Compute the mininum and maximum values a data type can take.
Definition: Utils.h:564
Describe a multidimensional execution window.
Definition: Window.h:39
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:201
SimpleTensor< T > slice(const SimpleTensor< T > &src, Coordinates starts, Coordinates ends)
const int32_t * bias