Compute Library
 20.05
CLPixelWiseMultiplicationKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2020 ARM Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
32 #include "support/StringSupport.h"
33 
34 namespace arm_compute
35 {
36 namespace
37 {
38 constexpr unsigned int num_elems_processed_per_iteration = 16;
39 
40 Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
41  ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
42 {
43  ARM_COMPUTE_UNUSED(overflow_policy);
44  ARM_COMPUTE_UNUSED(rounding_policy);
45 
46  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
49  1,
54  1,
58  ARM_COMPUTE_RETURN_ERROR_ON_MSG(scale < 0, "Scale cannot be negative.");
59  ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled() && !is_data_type_float(output->data_type()));
60 
61  const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
62 
63  ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
64 
65  // Validate in case of configured output
66  if(output->total_size() > 0)
67  {
69  1,
73  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::U8 && (input1->data_type() != DataType::U8 || input2->data_type() != DataType::U8),
74  "Output can only be U8 if both inputs are U8");
75  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QASYMM8 && (input1->data_type() != DataType::QASYMM8 || input2->data_type() != DataType::QASYMM8),
76  "Output can only be QASYMM8 if both inputs are QASYMM8");
77  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QASYMM8_SIGNED && (input1->data_type() != DataType::QASYMM8_SIGNED || input2->data_type() != DataType::QASYMM8_SIGNED),
78  "Output can only be QASYMM8_SIGNED if both inputs are QASYMM8_SIGNED");
79  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QSYMM16 && (input1->data_type() != DataType::QSYMM16 || input2->data_type() != DataType::QSYMM16),
80  "Output can only be QSYMM16 if both inputs are QSYMM16");
81  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::S32 && (input1->data_type() != DataType::QSYMM16 || input2->data_type() != DataType::QSYMM16),
82  "Output can only be S32 if both inputs are QSYMM16");
83  ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output");
84  }
85 
86  return Status{};
87 }
88 
89 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
90 {
91  const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1, *input2);
92  const TensorShape &out_shape = broadcast_pair.first;
93  const ValidRegion &valid_region = broadcast_pair.second;
94 
95  // Auto initialize output if not initialized
96  {
97  set_shape_if_empty(*output, out_shape);
98 
99  if(input1->data_type() == DataType::S16 || input2->data_type() == DataType::S16)
100  {
102  }
103  else if(input1->data_type() == DataType::F32 || input2->data_type() == DataType::F32)
104  {
106  }
107  else if(input1->data_type() == DataType::QASYMM8)
108  {
110  }
111  else if(input1->data_type() == DataType::QASYMM8_SIGNED)
112  {
114  }
115  else if(input1->data_type() == DataType::QSYMM16)
116  {
118  }
119  }
120 
122  Window win_input1 = win.broadcast_if_dimension_le_one(*input1);
123  Window win_input2 = win.broadcast_if_dimension_le_one(*input2);
124 
125  AccessWindowHorizontal input1_access(input1, 0, num_elems_processed_per_iteration);
126  AccessWindowHorizontal input2_access(input2, 0, num_elems_processed_per_iteration);
127  AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
128 
129  bool window_changed = update_window_and_padding(win_input1, input1_access)
130  || update_window_and_padding(win_input2, input2_access)
131  || update_window_and_padding(win, output_access);
132 
133  output_access.set_valid_region(win, valid_region);
134 
135  Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
136  return std::make_pair(err, win);
137 }
138 } // namespace
139 
141  : _input1(nullptr), _input2(nullptr), _output(nullptr)
142 {
143 }
144 
145 void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
146  ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
147 {
148  configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, scale, overflow_policy, rounding_policy, act_info);
149 }
150 
151 void CLPixelWiseMultiplicationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
152  ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
153 {
154  ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
155  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info(),
156  scale, overflow_policy, rounding_policy, act_info));
157 
158  // Configure kernel window
159  auto win_config = validate_and_configure_window(input1->info(), input2->info(), output->info());
160  ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
161 
162  _input1 = input1;
163  _input2 = input2;
164  _output = output;
165 
166  int scale_int = -1;
167  // Extract sign, exponent and mantissa
168  int exponent = 0;
169  float normalized_mantissa = std::frexp(scale, &exponent);
170  // Use int scaling if factor is equal to 1/2^n for 0 <= n <= 15
171  // frexp returns 0.5 as mantissa which means that the exponent will be in the range of -1 <= e <= 14
172  // Moreover, it will be negative as we deal with 1/2^n
173  if((normalized_mantissa == 0.5f) && (-14 <= exponent) && (exponent <= 1))
174  {
175  // Store the positive exponent. We know that we compute 1/2^n
176  // Additionally we need to subtract 1 to compensate that frexp used a mantissa of 0.5
177  scale_int = std::abs(exponent - 1);
178  }
179 
180  std::string acc_type;
181  // Check if it has float inputs and output
182  if(is_data_type_float(input1->info()->data_type()) || is_data_type_float(input2->info()->data_type()))
183  {
184  scale_int = -1;
185  acc_type = (input1->info()->data_type() == DataType::F32 || input2->info()->data_type() == DataType::F32) ? "float" : "half";
186  }
187  else
188  {
189  if(input1->info()->element_size() == 2 || input2->info()->element_size() == 2)
190  {
191  // Use 32-bit accumulator for 16-bit input
192  acc_type = "int";
193  }
194  else
195  {
196  // Use 16-bit accumulator for 8-bit input
197  acc_type = "ushort";
198  }
199  }
200 
201  const bool is_quantized = is_data_type_quantized(input1->info()->data_type());
202 
203  // Set kernel build options
204  std::string kernel_name = "pixelwise_mul";
205  CLBuildOptions build_opts;
206  build_opts.add_option("-DDATA_TYPE_IN1=" + get_cl_type_from_data_type(input1->info()->data_type()));
207  build_opts.add_option("-DDATA_TYPE_IN2=" + get_cl_type_from_data_type(input2->info()->data_type()));
208  build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type()));
210  if(is_quantized && (output->info()->data_type() != DataType::S32))
211  {
212  const UniformQuantizationInfo iq1_info = input1->info()->quantization_info().uniform();
213  const UniformQuantizationInfo iq2_info = input2->info()->quantization_info().uniform();
214  const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
215 
217  "-DOFFSET_IN1=" + support::cpp11::to_string(iq1_info.offset));
219  "-DOFFSET_IN2=" + support::cpp11::to_string(iq2_info.offset));
221  "-DOFFSET_OUT=" + support::cpp11::to_string(oq_info.offset));
222  build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq1_info.scale));
223  build_opts.add_option("-DSCALE_IN2=" + float_to_string_with_full_precision(iq2_info.scale));
224  build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
225  kernel_name += "_quantized";
226  }
227  else
228  {
229  kernel_name += (scale_int >= 0) ? "_int" : "_float";
230  build_opts.add_option_if_else(overflow_policy == ConvertPolicy::WRAP || is_data_type_float(output->info()->data_type()), "-DWRAP", "-DSATURATE");
231  build_opts.add_option_if_else(rounding_policy == RoundingPolicy::TO_ZERO, "-DROUND=_rtz", "-DROUND=_rte");
232  build_opts.add_option("-DACC_DATA_TYPE=" + acc_type);
233  if(act_info.enabled())
234  {
235  build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
236  build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
237  build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
238  }
239  }
240 
241  // Create kernel
242  _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
243 
244  // Set scale argument
245  unsigned int idx = 3 * num_arguments_per_3D_tensor(); // Skip the inputs and output parameters
246 
247  if(scale_int >= 0 && !is_quantized)
248  {
249  _kernel.setArg(idx++, scale_int);
250  }
251  else
252  {
253  _kernel.setArg(idx++, scale);
254  }
255 
256  ICLKernel::configure_internal(win_config.second);
257 }
258 
260  ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info)
261 {
262  ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
263  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input1, input2, output, scale, overflow_policy, rounding_policy, act_info));
264  ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
265 
266  return Status{};
267 }
268 
269 void CLPixelWiseMultiplicationKernel::run(const Window &window, cl::CommandQueue &queue)
270 {
273 
274  const TensorShape &in_shape1 = _input1->info()->tensor_shape();
275  const TensorShape &in_shape2 = _input2->info()->tensor_shape();
276  const TensorShape &out_shape = _output->info()->tensor_shape();
277 
278  bool can_collapse = true;
279  if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
280  {
281  can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
282  for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); ++d)
283  {
284  can_collapse = (in_shape1[d] == in_shape2[d]);
285  }
286  }
287 
288  bool has_collapsed = false;
289  Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
290 
291  const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
292  const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
293 
294  Window slice = collapsed.first_slice_window_3D();
295  Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
296  Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
297 
298  do
299  {
300  unsigned int idx = 0;
301  add_3D_tensor_argument(idx, _input1, slice_input1);
302  add_3D_tensor_argument(idx, _input2, slice_input2);
303  add_3D_tensor_argument(idx, _output, slice);
304  enqueue(queue, *this, slice, lws_hint());
305 
306  ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input1));
307  ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input2));
308  }
309  while(collapsed.slide_window_slice_3D(slice));
310 }
311 
313 {
314  const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
315  const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
316  return BorderSize{ 0, border, 0, 0 };
317 }
318 
319 namespace
320 {
321 constexpr unsigned int num_elems_processed_per_iteration_complex = 1;
322 
323 Status validate_arguments_complex(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
324 {
327 
328  const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
329 
330  ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
332 
333  // Validate in case of configured output
334  if(output->total_size() > 0)
335  {
337  ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output");
338  }
339 
340  return Status{};
341 }
342 
343 std::pair<Status, Window> validate_and_configure_window_complex(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
344 {
345  const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1, *input2);
346  const TensorShape &out_shape = broadcast_pair.first;
347  const ValidRegion &valid_region = broadcast_pair.second;
348 
349  // Auto initialize output if not initialized
350  const TensorInfo out_info(out_shape, input1->num_channels(), input1->data_type());
351  auto_init_if_empty(*output, out_info);
352 
353  Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration_complex));
354  Window win_input1 = win.broadcast_if_dimension_le_one(*input1);
355  Window win_input2 = win.broadcast_if_dimension_le_one(*input2);
356 
357  AccessWindowHorizontal input1_access(input1, 0, num_elems_processed_per_iteration_complex);
358  AccessWindowHorizontal input2_access(input2, 0, num_elems_processed_per_iteration_complex);
359  AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration_complex);
360 
361  bool window_changed = update_window_and_padding(win_input1, input1_access)
362  || update_window_and_padding(win_input2, input2_access)
363  || update_window_and_padding(win, output_access);
364 
365  output_access.set_valid_region(win, valid_region);
366 
367  Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
368  return std::make_pair(err, win);
369 }
370 } // namespace
371 
373  : _input1(nullptr), _input2(nullptr), _output(nullptr)
374 {
375 }
376 
377 void CLComplexPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
378 {
379  configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info);
380 }
381 
382 void CLComplexPixelWiseMultiplicationKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info)
383 {
384  ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
385  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_complex(input1->info(), input2->info(), output->info(), act_info));
386 
387  // Configure kernel window
388  auto win_config = validate_and_configure_window_complex(input1->info(), input2->info(), output->info());
389  ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
390 
391  _input1 = input1;
392  _input2 = input2;
393  _output = output;
394 
395  CLBuildOptions build_opts;
396  if(act_info.enabled())
397  {
398  build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation())));
399  build_opts.add_option("-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
400  build_opts.add_option("-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
401  }
402 
403  // Create kernel
404  _kernel = create_kernel(compile_context, "pixelwise_mul_complex", build_opts.options());
405 
406  ICLKernel::configure_internal(win_config.second);
407 }
408 
410 {
411  ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
412  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_complex(input1, input2, output, act_info));
413  ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_complex(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
414 
415  return Status{};
416 }
417 
418 void CLComplexPixelWiseMultiplicationKernel::run(const Window &window, cl::CommandQueue &queue)
419 {
422 
423  const TensorShape &in_shape1 = _input1->info()->tensor_shape();
424  const TensorShape &in_shape2 = _input2->info()->tensor_shape();
425  const TensorShape &out_shape = _output->info()->tensor_shape();
426 
427  bool can_collapse = true;
428  if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
429  {
430  can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
431  for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); ++d)
432  {
433  can_collapse = (in_shape1[d] == in_shape2[d]);
434  }
435  }
436 
437  bool has_collapsed = false;
438  Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
439 
440  const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
441  const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
442 
443  Window slice = collapsed.first_slice_window_3D();
444  Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
445  Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
446 
447  do
448  {
449  unsigned int idx = 0;
450  add_3D_tensor_argument(idx, _input1, slice_input1);
451  add_3D_tensor_argument(idx, _input2, slice_input2);
452  add_3D_tensor_argument(idx, _output, slice);
453  enqueue(queue, *this, slice, lws_hint());
454 
455  ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input1));
456  ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input2));
457  }
458  while(collapsed.slide_window_slice_3D(slice));
459 }
460 
462 {
463  const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
464  const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration_complex - 1U, replicateSize);
465  return BorderSize{ 0, border, 0, 0 };
466 }
467 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:1131
#define ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(tensor)
Definition: CLValidate.h:34
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
Shape of a tensor.
Definition: TensorShape.h:39
quantized, symmetric fixed-point 16-bit number
bool enabled() const
Check if initialised.
Definition: Types.h:1567
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
Container for 2D border size.
Definition: Types.h:272
void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint=CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items=false)
Add the kernel to the command queue with the given window.
Definition: ICLKernel.cpp:39
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Initialise the kernel's input, output and border mode.
const StringSet & options() const
Gets the current options list set.
cl::NDRange lws_hint() const
Return the Local-Workgroup-Size hint.
Definition: ICLKernel.h:247
TensorShape collapsed_from(size_t start) const
Return a copy with collapsed dimensions starting from a given point.
Definition: TensorShape.h:160
1 channel, 1 U8 per channel
float a() const
Get the alpha value.
Definition: Types.h:1557
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
std::string to_string(T &&value)
Convert integer and float values to string.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
1 channel, 1 F32 per channel
static TensorShape broadcast_shape(const Shapes &... shapes)
If shapes are broadcast compatible, return the broadcasted shape.
Definition: TensorShape.h:210
const std::string & string_from_activation_func(ActivationLayerInfo::ActivationFunction act)
Translates a given activation function to a string.
Definition: Utils.cpp:163
static CLKernelLibrary & get()
Access the KernelLibrary singleton.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Quantization info when assuming per layer quantization.
Status class.
Definition: Error.h:52
std::string lower_string(const std::string &val)
Lower a given string.
Definition: Utils.cpp:326
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
static std::pair< TensorShape, ValidRegion > broadcast_shape_and_valid_region(const Infos &... infos)
If infos are broadcast compatible tensor info's, return the broadcasted shape and the intersection of...
Definition: ITensorInfo.h:259
Activation Layer Information class.
Definition: Types.h:1517
bool set_data_type_if_unknown(ITensorInfo &info, DataType data_type)
Set the data type and number of channels to the specified value if the current data type is unknown.
Definition: Helpers.inl:257
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps=Steps(), bool skip_border=false, BorderSize border_size=BorderSize())
Calculate the maximum window for a given tensor shape and border setting.
Definition: Helpers.cpp:28
void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 3D tensor's parameters to the object's kernel's arguments starting from the index idx.
Definition: ICLKernel.h:158
Copyright (c) 2017-2020 ARM Limited.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
Definition: Helpers.inl:202
1 channel, 1 F16 per channel
1 channel, 1 S32 per channel
void add_option(std::string option)
Adds option to the existing build option list.
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of CLPixelWiseMultiplicatio...
cl::Kernel create_kernel(const CLCompileContext &ctx, const std::string &kernel_name, const std::set< std::string > &build_opts=std::set< std::string >())
Creates an opencl kernel using a compile context.
Definition: CLHelpers.cpp:387
bool update_window_and_padding(Window &win, Ts &&... patterns)
Update window and padding size for each of the access patterns.
Definition: Helpers.h:437
static constexpr unsigned int num_arguments_per_3D_tensor()
Returns the number of arguments enqueued per 3D tensor object.
Definition: ICLKernel.h:200
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
Window collapse_if_possible(const Window &full_window, size_t first, size_t last, bool *has_collapsed=nullptr) const
Collapse the dimensions between first and last if possible.
Definition: Window.inl:68
std::string float_to_string_with_full_precision(float val)
Create a string with the float in full precision.
Definition: Utils.h:1225
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
BorderSize border_size() const override
The size of the border for that kernel.
std::string kernel_name
size_t total_size() const
Collapses all dimensions to a single linear total size.
Definition: TensorShape.h:171
UniformQuantizationInfo uniform() const
Return per layer quantization info.
std::string get_cl_type_from_data_type(const DataType &dt)
Translates a tensor data type to the appropriate OpenCL type.
Definition: CLHelpers.cpp:37
RoundingPolicy
Rounding method.
Definition: Rounding.h:30
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
bool have_different_dimensions(const Dimensions< T > &dim1, const Dimensions< T > &dim2, unsigned int upper_dim)
Definition: Validate.h:51
void add_option_if(bool cond, std::string option)
Adds option if a given condition is true;.
virtual size_t element_size() const =0
Element size in bytes calculated as data_size() * num_channels()
bool slide_window_slice_3D(Window &slice) const
Slide the passed 3D window slice.
Definition: Window.h:333
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
1 channel, 1 S16 per channel
CLCompileContext class.
void run(const Window &window, cl::CommandQueue &queue) override
Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1153
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
BorderSize border_size() const override
The size of the border for that kernel.
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
#define ARM_COMPUTE_CREATE_ERROR(error_code, msg)
Creates an error with a given message.
Definition: Error.h:159
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:122
bool set_shape_if_empty(ITensorInfo &info, const TensorShape &shape)
Set the shape to the specified value if the current assignment is empty.
Definition: Helpers.inl:235
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
unsigned int num_elems_processed_per_iteration
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
bool set_format_if_unknown(ITensorInfo &info, Format format)
Set the format, data type and number of channels to the specified value if the current data type is u...
Definition: Helpers.inl:246
void run(const Window &window, cl::CommandQueue &queue) override
Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of CLComplexPixelWiseMultip...
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1552
float b() const
Get the beta value.
Definition: Types.h:1562
quantized, asymmetric fixed-point 8-bit number signed
SimpleTensor< T > scale(const SimpleTensor< T > &src, float scale_x, float scale_y, InterpolationPolicy policy, BorderMode border_mode, T constant_border_value, SamplingPolicy sampling_policy, bool ceil_policy_scale, bool align_corners)
Definition: Scale.cpp:187
Window first_slice_window_3D() const
First 3D slice of the window.
Definition: Window.h:289
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205
Truncates the least significant values that are lost in operations.
Describe a multidimensional execution window.
Definition: Window.h:39
ConvertPolicy
Policy to handle overflow.
Definition: Types.h:362
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Initialise the kernel's input, output and border mode.
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:1111
SimpleTensor< T > slice(const SimpleTensor< T > &src, Coordinates starts, Coordinates ends)
void add_option_if_else(bool cond, std::string option_true, std::string option_false)
Adds first option if condition is true else the second one.