Compute Library
 21.02
CLPixelWiseMultiplicationKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
31 #include "src/core/CL/CLValidate.h"
34 #include "support/Cast.h"
35 #include "support/StringSupport.h"
36 
37 namespace arm_compute
38 {
39 namespace
40 {
41 constexpr unsigned int num_elems_processed_per_iteration = 16;
42 
43 Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
44  ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
45 {
46  ARM_COMPUTE_UNUSED(overflow_policy);
47  ARM_COMPUTE_UNUSED(rounding_policy);
48 
49  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
52  1,
57  1,
61  ARM_COMPUTE_RETURN_ERROR_ON_MSG(scale < 0, "Scale cannot be negative.");
62  ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(output->data_type()));
63 
64  const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
65 
66  ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
67 
68  // Validate in case of configured output
69  if(output->total_size() > 0)
70  {
72  1,
76  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::U8 && (input1->data_type() != DataType::U8 || input2->data_type() != DataType::U8),
77  "Output can only be U8 if both inputs are U8");
78  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QASYMM8 && (input1->data_type() != DataType::QASYMM8 || input2->data_type() != DataType::QASYMM8),
79  "Output can only be QASYMM8 if both inputs are QASYMM8");
80  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QASYMM8_SIGNED && (input1->data_type() != DataType::QASYMM8_SIGNED || input2->data_type() != DataType::QASYMM8_SIGNED),
81  "Output can only be QASYMM8_SIGNED if both inputs are QASYMM8_SIGNED");
82  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QSYMM16 && (input1->data_type() != DataType::QSYMM16 || input2->data_type() != DataType::QSYMM16),
83  "Output can only be QSYMM16 if both inputs are QSYMM16");
84  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::S32 && (input1->data_type() != DataType::QSYMM16 || input2->data_type() != DataType::QSYMM16),
85  "Output can only be S32 if both inputs are QSYMM16");
86  ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output");
87  }
88 
89  return Status{};
90 }
91 
92 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
93 {
94  const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1, *input2);
95  const TensorShape &out_shape = broadcast_pair.first;
96  const ValidRegion &valid_region = broadcast_pair.second;
97 
98  // Auto initialize output if not initialized
99  {
100  set_shape_if_empty(*output, out_shape);
101 
102  if(input1->data_type() == DataType::S16 || input2->data_type() == DataType::S16)
103  {
105  }
106  else if(input1->data_type() == DataType::F32 || input2->data_type() == DataType::F32)
107  {
109  }
110  else if(input1->data_type() == DataType::QASYMM8)
111  {
113  }
114  else if(input1->data_type() == DataType::QASYMM8_SIGNED)
115  {
117  }
118  else if(input1->data_type() == DataType::QSYMM16)
119  {
121  }
122  }
123 
124  Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration));
125  Window win_input1 = win.broadcast_if_dimension_le_one(*input1);
126  Window win_input2 = win.broadcast_if_dimension_le_one(*input2);
127 
128  AccessWindowHorizontal input1_access(input1, 0, num_elems_processed_per_iteration);
129  AccessWindowHorizontal input2_access(input2, 0, num_elems_processed_per_iteration);
130  AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
131 
132  bool window_changed = update_window_and_padding(win_input1, input1_access)
133  || update_window_and_padding(win_input2, input2_access)
134  || update_window_and_padding(win, output_access);
135 
136  output_access.set_valid_region(win, valid_region);
137 
138  Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
139  return std::make_pair(err, win);
140 }
141 } // namespace
142 
144  : _input1(nullptr), _input2(nullptr), _output(nullptr)
145 {
146 }
147 
149  ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
150 {
151  configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, scale, overflow_policy, rounding_policy, act_info);
152 }
153 
154 void CLPixelWiseMultiplicationKernel::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale,
155  ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
156 {
157  ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
158  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1, input2, output,
159  scale, overflow_policy, rounding_policy, act_info));
160 
161  // Configure kernel window
162  auto win_config = validate_and_configure_window(input1, input2, output);
163  ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
164 
165  _input1 = input1;
166  _input2 = input2;
167  _output = output;
168 
169  int scale_int = -1;
170  // Extract sign, exponent and mantissa
171  int exponent = 0;
172  float normalized_mantissa = std::frexp(scale, &exponent);
173  // Use int scaling if factor is equal to 1/2^n for 0 <= n <= 15
174  // frexp returns 0.5 as mantissa which means that the exponent will be in the range of -1 <= e <= 14
175  // Moreover, it will be negative as we deal with 1/2^n
176  if((normalized_mantissa == 0.5f) && (-14 <= exponent) && (exponent <= 1))
177  {
178  // Store the positive exponent. We know that we compute 1/2^n
179  // Additionally we need to subtract 1 to compensate that frexp used a mantissa of 0.5
180  scale_int = std::abs(exponent - 1);
181  }
182 
183  std::string acc_type;
184  // Check if it has float inputs and output
185  if(is_data_type_float(input1->data_type()) || is_data_type_float(input2->data_type()))
186  {
187  scale_int = -1;
188  acc_type = (input1->data_type() == DataType::F32 || input2->data_type() == DataType::F32) ? "float" : "half";
189  }
190  else
191  {
192  if(input1->element_size() == 2 || input2->element_size() == 2)
193  {
194  // Use 32-bit accumulator for 16-bit input
195  acc_type = "int";
196  }
197  else
198  {
199  // Use 16-bit accumulator for 8-bit input
200  acc_type = "ushort";
201  }
202  }
203 
204  const bool is_quantized = is_data_type_quantized(input1->data_type());
205 
206  // Set kernel build options
207  std::string kernel_name = "pixelwise_mul";
208  CLBuildOptions build_opts;
209  build_opts.add_option("-DDATA_TYPE_IN1=" + get_cl_type_from_data_type(input1->data_type()));
210  build_opts.add_option("-DDATA_TYPE_IN2=" + get_cl_type_from_data_type(input2->data_type()));
211  build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->data_type()));
212  build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
213  if(is_quantized && (output->data_type() != DataType::S32))
214  {
215  const UniformQuantizationInfo iq1_info = input1->quantization_info().uniform();
216  const UniformQuantizationInfo iq2_info = input2->quantization_info().uniform();
217  const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
218 
220  "-DOFFSET_IN1=" + support::cpp11::to_string(iq1_info.offset));
222  "-DOFFSET_IN2=" + support::cpp11::to_string(iq2_info.offset));
224  "-DOFFSET_OUT=" + support::cpp11::to_string(oq_info.offset));
225  build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq1_info.scale));
226  build_opts.add_option("-DSCALE_IN2=" + float_to_string_with_full_precision(iq2_info.scale));
227  build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
228  kernel_name += "_quantized";
229  }
230  else
231  {
232  kernel_name += (scale_int >= 0) ? "_int" : "_float";
233  build_opts.add_option_if_else(overflow_policy == ConvertPolicy::WRAP || is_data_type_float(output->data_type()), "-DWRAP", "-DSATURATE");
234  build_opts.add_option_if_else(rounding_policy == RoundingPolicy::TO_ZERO, "-DROUND=_rtz", "-DROUND=_rte");
235  build_opts.add_option("-DACC_DATA_TYPE=" + acc_type);
236  if(act_info.enabled())
237  {
238  build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
239  build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
240  build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
241  }
242  }
243 
244  // Create kernel
245  _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
246 
247  // Set scale argument
248  unsigned int idx = 3 * num_arguments_per_3D_tensor(); // Skip the inputs and output parameters
249 
250  if(scale_int >= 0 && !is_quantized)
251  {
252  _kernel.setArg(idx++, scale_int);
253  }
254  else
255  {
256  _kernel.setArg(idx++, scale);
257  }
258 
259  ICLKernel::configure_internal(win_config.second);
260 }
261 
262 Status CLPixelWiseMultiplicationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
263  ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
264 {
265  ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
266  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input1, input2, output, scale, overflow_policy, rounding_policy, act_info));
267  ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
268 
269  return Status{};
270 }
271 
272 void CLPixelWiseMultiplicationKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
273 {
276 
277  const auto src_0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
278  const auto src_1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
279  auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
280 
281  const TensorShape &in_shape1 = src_0->info()->tensor_shape();
282  const TensorShape &in_shape2 = src_1->info()->tensor_shape();
283  const TensorShape &out_shape = dst->info()->tensor_shape();
284 
285  bool can_collapse = true;
286  if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
287  {
288  can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
289  for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); ++d)
290  {
291  can_collapse = (in_shape1[d] == in_shape2[d]);
292  }
293  }
294 
295  bool has_collapsed = false;
296  Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
297 
298  const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
299  const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
300 
301  Window slice = collapsed.first_slice_window_3D();
302  Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
303  Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
304 
305  do
306  {
307  unsigned int idx = 0;
308  add_3D_tensor_argument(idx, src_0, slice_input1);
309  add_3D_tensor_argument(idx, src_1, slice_input2);
310  add_3D_tensor_argument(idx, dst, slice);
311  enqueue(queue, *this, slice, lws_hint());
312 
313  ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input1));
314  ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input2));
315  }
316  while(collapsed.slide_window_slice_3D(slice));
317 }
318 
320 {
321  const unsigned int replicateSize = _output->dimension(0) - std::min(_input1->dimension(0), _input2->dimension(0));
322  const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
323  return BorderSize{ 0, border, 0, 0 };
324 }
325 
326 namespace
327 {
328 constexpr unsigned int num_elems_processed_per_iteration_complex = 1;
329 
330 Status validate_arguments_complex(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
331 {
335 
336  const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
337 
338  ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
340 
341  // Validate in case of configured output
342  if(output->total_size() > 0)
343  {
346  ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output");
347  }
348 
349  return Status{};
350 }
351 
352 std::pair<Status, Window> validate_and_configure_window_complex(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
353 {
354  const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1, *input2);
355  const TensorShape &out_shape = broadcast_pair.first;
356  const ValidRegion &valid_region = broadcast_pair.second;
357 
358  // Auto initialize output if not initialized
359  const TensorInfo out_info(out_shape, input1->num_channels(), input1->data_type());
360  auto_init_if_empty(*output, out_info);
361 
362  Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration_complex));
363  Window win_input1 = win.broadcast_if_dimension_le_one(*input1);
364  Window win_input2 = win.broadcast_if_dimension_le_one(*input2);
365 
366  AccessWindowHorizontal input1_access(input1, 0, num_elems_processed_per_iteration_complex);
367  AccessWindowHorizontal input2_access(input2, 0, num_elems_processed_per_iteration_complex);
368  AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration_complex);
369 
370  bool window_changed = update_window_and_padding(win_input1, input1_access)
371  || update_window_and_padding(win_input2, input2_access)
372  || update_window_and_padding(win, output_access);
373 
374  output_access.set_valid_region(win, valid_region);
375 
376  Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
377  return std::make_pair(err, win);
378 }
379 } // namespace
380 
382  : _input1(nullptr), _input2(nullptr), _output(nullptr)
383 {
384 }
385 
387 {
388  configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info);
389 }
390 
392 {
393  ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
394  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_complex(input1, input2, output, act_info));
395 
396  // Configure kernel window
397  auto win_config = validate_and_configure_window_complex(input1, input2, output);
398  ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
399 
400  _input1 = input1;
401  _input2 = input2;
402  _output = output;
403 
404  CLBuildOptions build_opts;
405  build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(_output->data_type()));
406  if(act_info.enabled())
407  {
408  build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
409  build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
410  build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
411  }
412 
413  // Create kernel
414  _kernel = create_kernel(compile_context, "pixelwise_mul_complex", build_opts.options());
415 
416  ICLKernel::configure_internal(win_config.second);
417 }
418 
420 {
421  ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
422  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_complex(input1, input2, output, act_info));
423  ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_complex(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
424 
425  return Status{};
426 }
427 
428 void CLComplexPixelWiseMultiplicationKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
429 {
432 
433  const auto src_0 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
434  const auto src_1 = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
435  auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
436 
437  const TensorShape &in_shape1 = src_0->info()->tensor_shape();
438  const TensorShape &in_shape2 = src_1->info()->tensor_shape();
439  const TensorShape &out_shape = dst->info()->tensor_shape();
440 
441  bool can_collapse = true;
442  if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
443  {
444  can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
445  for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); ++d)
446  {
447  can_collapse = (in_shape1[d] == in_shape2[d]);
448  }
449  }
450 
451  bool has_collapsed = false;
452  Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
453 
454  const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
455  const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
456 
457  Window slice = collapsed.first_slice_window_3D();
458  Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
459  Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
460 
461  do
462  {
463  unsigned int idx = 0;
464  add_3D_tensor_argument(idx, src_0, slice_input1);
465  add_3D_tensor_argument(idx, src_1, slice_input2);
466  add_3D_tensor_argument(idx, dst, slice);
467  enqueue(queue, *this, slice, lws_hint());
468 
469  ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input1));
470  ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input2));
471  }
472  while(collapsed.slide_window_slice_3D(slice));
473 }
474 
476 {
477  const unsigned int replicateSize = _output->dimension(0) - std::min(_input1->dimension(0), _input2->dimension(0));
478  const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration_complex - 1U, replicateSize);
479  return BorderSize{ 0, border, 0, 0 };
480 }
481 } // namespace arm_compute
bool set_format_if_unknown(ITensorInfo &info, Format format)
Set the format, data type and number of channels to the specified value if the current data type is u...
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:1168
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
#define ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(tensor)
Definition: CLValidate.h:35
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
Shape of a tensor.
Definition: TensorShape.h:39
quantized, symmetric fixed-point 16-bit number
bool set_data_type_if_unknown(ITensorInfo &info, DataType data_type)
Set the data type and number of channels to the specified value if the current data type is unknown...
void enqueue(IGCKernel &kernel, const Window &window, const gles::NDRange &lws=gles::NDRange(1U, 1U, 1U))
Add the kernel to the command queue with the given window.
Definition: IGCKernel.cpp:41
bool enabled() const
Check if initialised.
Definition: Types.h:1600
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
Container for 2D border size.
Definition: Types.h:273
const StringSet & options() const
Gets the current options list set.
void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Initialise the kernel&#39;s input, output and border mode.
cl::NDRange lws_hint() const
Return the Local-Workgroup-Size hint.
Definition: ICLKernel.h:276
TensorShape collapsed_from(size_t start) const
Return a copy with collapsed dimensions starting from a given point.
Definition: TensorShape.h:161
1 channel, 1 U8 per channel
float a() const
Get the alpha value.
Definition: Types.h:1590
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
std::string to_string(T &&value)
Convert integer and float values to string.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
static TensorShape broadcast_shape(const Shapes &... shapes)
If shapes are broadcast compatible, return the broadcasted shape.
Definition: TensorShape.h:211
const std::string & string_from_activation_func(ActivationLayerInfo::ActivationFunction act)
Translates a given activation function to a string.
Definition: Utils.cpp:163
static CLKernelLibrary & get()
Access the KernelLibrary singleton.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Quantization info when assuming per layer quantization.
Status class.
Definition: Error.h:52
std::string lower_string(const std::string &val)
Lower a given string.
Definition: Utils.cpp:350
const ValidRegion valid_region
Definition: Scale.cpp:221
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
static std::pair< TensorShape, ValidRegion > broadcast_shape_and_valid_region(const Infos &... infos)
If infos are broadcast compatible tensor info&#39;s, return the broadcasted shape and the intersection of...
Definition: ITensorInfo.h:271
Activation Layer Information class.
Definition: Types.h:1550
void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 3D tensor&#39;s parameters to the object&#39;s kernel&#39;s arguments starting from the index idx...
Definition: ICLKernel.h:172
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
1 channel, 1 S32 per channel
void add_option(std::string option)
Adds option to the existing build option list.
void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override
Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue...
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Definition: ITensorPack.cpp:40
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of CLPixelWiseMultiplicatio...
cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set< std::string > &build_opts=std::set< std::string >())
Creates an opencl kernel using a compile context.
Definition: CLHelpers.cpp:403
bool update_window_and_padding(Window &win, Ts &&... patterns)
Update window and padding size for each of the access patterns.
Definition: WindowHelpers.h:46
static constexpr unsigned int num_arguments_per_3D_tensor()
Returns the number of arguments enqueued per 3D tensor object.
Definition: ICLKernel.h:214
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
Window collapse_if_possible(const Window &full_window, size_t first, size_t last, bool *has_collapsed=nullptr) const
Collapse the dimensions between first and last if possible.
Definition: Window.inl:68
std::string float_to_string_with_full_precision(float val)
Create a string with the float in full precision.
Definition: Utils.h:1262
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override
Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue...
quantized, asymmetric fixed-point 8-bit number unsigned
Class to describe a number of elements in each dimension.
Definition: Steps.h:40
BorderSize border_size() const override
The size of the border for that kernel.
Implementation of a row access pattern.
std::string kernel_name
size_t total_size() const
Collapses all dimensions to a single linear total size.
Definition: TensorShape.h:172
UniformQuantizationInfo uniform() const
Return per layer quantization info.
std::string get_cl_type_from_data_type(const DataType &dt)
Translates a tensor data type to the appropriate OpenCL type.
Definition: CLHelpers.cpp:37
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
RoundingPolicy
Rounding method.
Definition: Rounding.h:30
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
bool set_shape_if_empty(ITensorInfo &info, const TensorShape &shape)
Set the shape to the specified value if the current assignment is empty.
bool have_different_dimensions(const Dimensions< T > &dim1, const Dimensions< T > &dim2, unsigned int upper_dim)
Definition: Validate.h:51
void add_option_if(bool cond, std::string option)
Adds option if a given condition is true;.
virtual size_t element_size() const =0
Element size in bytes calculated as data_size() * num_channels()
Window broadcast_if_dimension_le_one(const TensorShape &shape) const
Don&#39;t advance in the dimension where shape is less equal to 1.
Definition: Window.inl:120
bool slide_window_slice_3D(Window &slice) const
Slide the passed 3D window slice.
Definition: Window.h:335
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941
1 channel, 1 S16 per channel
CLCompileContext class.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1190
ITensor * get_tensor(int id)
Get tensor of a given id from the pac.
Definition: ITensorPack.cpp:50
BorderSize border_size() const override
The size of the border for that kernel.
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
#define ARM_COMPUTE_CREATE_ERROR(error_code, msg)
Creates an error with a given message.
Definition: Error.h:159
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
Manages all the OpenCL kernels compilation and caching, provides accessors for the OpenCL Context...
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:143
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
Wrapper to configure the Khronos OpenCL C++ header.
unsigned int num_elems_processed_per_iteration
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
Tensor packing service.
Definition: ITensorPack.h:37
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:45
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of CLComplexPixelWiseMultip...
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1585
float b() const
Get the beta value.
Definition: Types.h:1595
quantized, asymmetric fixed-point 8-bit number signed
Container for valid region of a window.
Definition: Types.h:188
void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Initialise the kernel&#39;s input, output and border mode.
Window first_slice_window_3D() const
First 3D slice of the window.
Definition: Window.h:291
Truncates the least significant values that are lost in operations.
Describe a multidimensional execution window.
Definition: Window.h:39
ConvertPolicy
Policy to handle overflow.
Definition: Types.h:385
virtual size_t num_channels() const =0
The number of channels for each tensor element.
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:1148
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205
void set_valid_region(const Window &window, const ValidRegion &input_valid_region, bool border_undefined=false, const BorderSize &border_size=BorderSize(0))
Set the valid region based on access pattern, valid region of the inputs and border mode...
SimpleTensor< T > slice(const SimpleTensor< T > &src, Coordinates starts, Coordinates ends)
void add_option_if_else(bool cond, std::string option_true, std::string option_false)
Adds first option if condition is true else the second one.