Compute Library
 20.02.1
CLPixelWiseMultiplicationKernel.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2020 ARM Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
31 #include "arm_compute/core/Error.h"
35 
36 #include <cmath>
37 #include <cstdlib>
38 #include <set>
39 #include <string>
40 
41 namespace arm_compute
42 {
43 namespace
44 {
45 constexpr unsigned int num_elems_processed_per_iteration = 16;
46 
47 Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
49 {
50  ARM_COMPUTE_UNUSED(overflow_policy);
52 
53  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
56  1,
61  1,
65  ARM_COMPUTE_RETURN_ERROR_ON_MSG(scale < 0, "Scale cannot be negative.");
66 
67  const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
68 
69  ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
70 
71  // Validate in case of configured output
72  if(output->total_size() > 0)
73  {
75  1,
79  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::U8 && (input1->data_type() != DataType::U8 || input2->data_type() != DataType::U8),
80  "Output can only be U8 if both inputs are U8");
81  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QASYMM8 && (input1->data_type() != DataType::QASYMM8 || input2->data_type() != DataType::QASYMM8),
82  "Output can only be QASYMM8 if both inputs are QASYMM8");
83  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QASYMM8_SIGNED && (input1->data_type() != DataType::QASYMM8_SIGNED || input2->data_type() != DataType::QASYMM8_SIGNED),
84  "Output can only be QASYMM8_SIGNED if both inputs are QASYMM8_SIGNED");
85  ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::QSYMM16 && (input1->data_type() != DataType::QSYMM16 || input2->data_type() != DataType::QSYMM16),
86  "Output can only be QSYMM16 if both inputs are QSYMM16");
87  ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output");
88  }
89 
90  return Status{};
91 }
92 
93 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
94 {
95  const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1, *input2);
96  const TensorShape &out_shape = broadcast_pair.first;
97  const ValidRegion &valid_region = broadcast_pair.second;
98 
99  // Auto initialize output if not initialized
100  {
101  set_shape_if_empty(*output, out_shape);
102 
103  if(input1->data_type() == DataType::S16 || input2->data_type() == DataType::S16)
104  {
106  }
107  else if(input1->data_type() == DataType::F32 || input2->data_type() == DataType::F32)
108  {
110  }
111  else if(input1->data_type() == DataType::QASYMM8)
112  {
114  }
115  else if(input1->data_type() == DataType::QASYMM8_SIGNED)
116  {
118  }
119  else if(input1->data_type() == DataType::QSYMM16)
120  {
122  }
123  }
124 
126  Window win_input1 = win.broadcast_if_dimension_le_one(*input1);
127  Window win_input2 = win.broadcast_if_dimension_le_one(*input2);
128 
129  AccessWindowHorizontal input1_access(input1, 0, num_elems_processed_per_iteration);
130  AccessWindowHorizontal input2_access(input2, 0, num_elems_processed_per_iteration);
131  AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
132 
133  bool window_changed = update_window_and_padding(win_input1, input1_access)
134  || update_window_and_padding(win_input2, input2_access)
135  || update_window_and_padding(win, output_access);
136 
137  output_access.set_valid_region(win, valid_region);
138 
139  Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
140  return std::make_pair(err, win);
141 }
142 } // namespace
143 
145  : _input1(nullptr), _input2(nullptr), _output(nullptr)
146 {
147 }
148 
149 void CLPixelWiseMultiplicationKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
151 {
152  ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
153  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info(),
154  scale, overflow_policy, rounding_policy));
155 
156  // Configure kernel window
157  auto win_config = validate_and_configure_window(input1->info(), input2->info(), output->info());
158  ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
159 
160  _input1 = input1;
161  _input2 = input2;
162  _output = output;
163 
164  int scale_int = -1;
165  // Extract sign, exponent and mantissa
166  int exponent = 0;
167  float normalized_mantissa = std::frexp(scale, &exponent);
168  // Use int scaling if factor is equal to 1/2^n for 0 <= n <= 15
169  // frexp returns 0.5 as mantissa which means that the exponent will be in the range of -1 <= e <= 14
170  // Moreover, it will be negative as we deal with 1/2^n
171  if((normalized_mantissa == 0.5f) && (-14 <= exponent) && (exponent <= 1))
172  {
173  // Store the positive exponent. We know that we compute 1/2^n
174  // Additionally we need to subtract 1 to compensate that frexp used a mantissa of 0.5
175  scale_int = std::abs(exponent - 1);
176  }
177 
178  std::string compute_type;
179  // Check if it has float inputs and output
180  if(is_data_type_float(input1->info()->data_type()) || is_data_type_float(input2->info()->data_type()))
181  {
182  scale_int = -1;
183  compute_type = (input1->info()->data_type() == DataType::F32 || input2->info()->data_type() == DataType::F32) ? "float" : "half";
184  }
185  else
186  {
187  if(input1->info()->data_type() == DataType::S16 || input2->info()->data_type() == DataType::S16)
188  {
189  compute_type = "int";
190  }
191  else
192  {
193  compute_type = "ushort";
194  }
195  }
196 
197  const bool is_quantized = is_data_type_quantized(input1->info()->data_type());
198 
199  // Set kernel build options
200  std::string kernel_name = "pixelwise_mul";
201  CLBuildOptions build_opts;
202  build_opts.add_option("-DDATA_TYPE_IN1=" + get_cl_type_from_data_type(input1->info()->data_type()));
203  build_opts.add_option("-DDATA_TYPE_IN2=" + get_cl_type_from_data_type(input2->info()->data_type()));
204  build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type()));
206  if(is_quantized)
207  {
208  const UniformQuantizationInfo iq1_info = input1->info()->quantization_info().uniform();
209  const UniformQuantizationInfo iq2_info = input2->info()->quantization_info().uniform();
210  const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
211 
213  "-DOFFSET_IN1=" + support::cpp11::to_string(iq1_info.offset));
215  "-DOFFSET_IN2=" + support::cpp11::to_string(iq2_info.offset));
217  "-DOFFSET_OUT=" + support::cpp11::to_string(oq_info.offset));
218  build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq1_info.scale));
219  build_opts.add_option("-DSCALE_IN2=" + float_to_string_with_full_precision(iq2_info.scale));
220  build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
221  kernel_name += "_quantized";
222  }
223  else
224  {
225  kernel_name += (scale_int >= 0) ? "_int" : "_float";
226  build_opts.add_option_if_else(overflow_policy == ConvertPolicy::WRAP || is_data_type_float(output->info()->data_type()), "-DWRAP", "-DSATURATE");
227  build_opts.add_option_if_else(rounding_policy == RoundingPolicy::TO_ZERO, "-DROUND=_rtz", "-DROUND=_rte");
228  build_opts.add_option("-DDATA_TYPE_RES=" + compute_type);
229  }
230 
231  // Create kernel
232  _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
233 
234  // Set scale argument
235  unsigned int idx = 3 * num_arguments_per_3D_tensor(); // Skip the inputs and output parameters
236 
237  if(scale_int >= 0 && !is_quantized)
238  {
239  _kernel.setArg(idx++, scale_int);
240  }
241  else
242  {
243  _kernel.setArg(idx++, scale);
244  }
245 
246  ICLKernel::configure_internal(win_config.second);
247 }
248 
251 {
252  ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
253  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input1, input2, output, scale, overflow_policy, rounding_policy));
254  ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
255 
256  return Status{};
257 }
258 
259 void CLPixelWiseMultiplicationKernel::run(const Window &window, cl::CommandQueue &queue)
260 {
263 
264  const TensorShape &in_shape1 = _input1->info()->tensor_shape();
265  const TensorShape &in_shape2 = _input2->info()->tensor_shape();
266  const TensorShape &out_shape = _output->info()->tensor_shape();
267 
268  bool can_collapse = true;
269  if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
270  {
271  can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
272  for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); ++d)
273  {
274  can_collapse = (in_shape1[d] == in_shape2[d]);
275  }
276  }
277 
278  bool has_collapsed = false;
279  Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
280 
281  const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
282  const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
283 
284  Window slice = collapsed.first_slice_window_3D();
285  Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
286  Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
287 
288  do
289  {
290  unsigned int idx = 0;
291  add_3D_tensor_argument(idx, _input1, slice_input1);
292  add_3D_tensor_argument(idx, _input2, slice_input2);
293  add_3D_tensor_argument(idx, _output, slice);
294  enqueue(queue, *this, slice, lws_hint());
295 
296  ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input1));
297  ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input2));
298  }
299  while(collapsed.slide_window_slice_3D(slice));
300 }
301 
303 {
304  const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
305  const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
306  return BorderSize{ 0, border, 0, 0 };
307 }
308 
309 namespace
310 {
311 constexpr unsigned int num_elems_processed_per_iteration_complex = 1;
312 
313 Status validate_arguments_complex(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
314 {
317 
318  const TensorShape &out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
319 
320  ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
321 
322  // Validate in case of configured output
323  if(output->total_size() > 0)
324  {
326  ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), "Wrong shape for output");
327  }
328 
329  return Status{};
330 }
331 
332 std::pair<Status, Window> validate_and_configure_window_complex(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
333 {
334  const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1, *input2);
335  const TensorShape &out_shape = broadcast_pair.first;
336  const ValidRegion &valid_region = broadcast_pair.second;
337 
338  // Auto initialize output if not initialized
339  const TensorInfo out_info(out_shape, input1->num_channels(), input1->data_type());
340  auto_init_if_empty(*output, out_info);
341 
342  Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration_complex));
343  Window win_input1 = win.broadcast_if_dimension_le_one(*input1);
344  Window win_input2 = win.broadcast_if_dimension_le_one(*input2);
345 
346  AccessWindowHorizontal input1_access(input1, 0, num_elems_processed_per_iteration_complex);
347  AccessWindowHorizontal input2_access(input2, 0, num_elems_processed_per_iteration_complex);
348  AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration_complex);
349 
350  bool window_changed = update_window_and_padding(win_input1, input1_access)
351  || update_window_and_padding(win_input2, input2_access)
352  || update_window_and_padding(win, output_access);
353 
354  output_access.set_valid_region(win, valid_region);
355 
356  Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
357  return std::make_pair(err, win);
358 }
359 } // namespace
360 
362  : _input1(nullptr), _input2(nullptr), _output(nullptr)
363 {
364 }
365 
367 {
368  ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
369  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_complex(input1->info(), input2->info(), output->info()));
370 
371  // Configure kernel window
372  auto win_config = validate_and_configure_window_complex(input1->info(), input2->info(), output->info());
373  ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
374 
375  _input1 = input1;
376  _input2 = input2;
377  _output = output;
378 
379  // Create kernel
380  _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("pixelwise_mul_complex"));
381 
382  ICLKernel::configure_internal(win_config.second);
383 }
384 
386 {
387  ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
388  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_complex(input1, input2, output));
389  ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_complex(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
390 
391  return Status{};
392 }
393 
394 void CLComplexPixelWiseMultiplicationKernel::run(const Window &window, cl::CommandQueue &queue)
395 {
398 
399  const TensorShape &in_shape1 = _input1->info()->tensor_shape();
400  const TensorShape &in_shape2 = _input2->info()->tensor_shape();
401  const TensorShape &out_shape = _output->info()->tensor_shape();
402 
403  bool can_collapse = true;
404  if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
405  {
406  can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
407  for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); ++d)
408  {
409  can_collapse = (in_shape1[d] == in_shape2[d]);
410  }
411  }
412 
413  bool has_collapsed = false;
414  Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
415 
416  const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
417  const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
418 
419  Window slice = collapsed.first_slice_window_3D();
420  Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
421  Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
422 
423  do
424  {
425  unsigned int idx = 0;
426  add_3D_tensor_argument(idx, _input1, slice_input1);
427  add_3D_tensor_argument(idx, _input2, slice_input2);
428  add_3D_tensor_argument(idx, _output, slice);
429  enqueue(queue, *this, slice, lws_hint());
430 
431  ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input1));
432  ARM_COMPUTE_UNUSED(collapsed.slide_window_slice_3D(slice_input2));
433  }
434  while(collapsed.slide_window_slice_3D(slice));
435 }
436 
438 {
439  const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
440  const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration_complex - 1U, replicateSize);
441  return BorderSize{ 0, border, 0, 0 };
442 }
443 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:1117
#define ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(tensor)
Definition: CLValidate.h:34
const Window & window() const
The maximum window the kernel can be executed on.
Definition: IKernel.cpp:28
Shape of a tensor.
Definition: TensorShape.h:39
quantized, symmetric fixed-point 16-bit number
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
Container for 2D border size.
Definition: Types.h:269
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
Static function to check if given info will lead to a valid configuration of CLComplexPixelWiseMultip...
void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint=CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items=false)
Add the kernel to the command queue with the given window.
Definition: ICLKernel.cpp:39
const StringSet & options() const
Gets the current options list set.
cl::NDRange lws_hint() const
Return the Local-Workgroup-Size hint.
Definition: ICLKernel.h:247
TensorShape collapsed_from(size_t start) const
Return a copy with collapsed dimensions starting from a given point.
Definition: TensorShape.h:160
1 channel, 1 U8 per channel
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
std::string to_string(T &&value)
Convert integer and float values to string.
virtual DataType data_type() const =0
Data type used for each element of the tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
1 channel, 1 F32 per channel
static TensorShape broadcast_shape(const Shapes &... shapes)
If shapes are broadcast compatible, return the broadcasted shape.
Definition: TensorShape.h:210
static CLKernelLibrary & get()
Access the KernelLibrary singleton.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Quantization info when assuming per layer quantization.
Status class.
Definition: Error.h:52
static std::pair< TensorShape, ValidRegion > broadcast_shape_and_valid_region(const Infos &... infos)
If infos are broadcast compatible tensor info's, return the broadcasted shape and the intersection of...
Definition: ITensorInfo.h:259
bool set_data_type_if_unknown(ITensorInfo &info, DataType data_type)
Set the data type and number of channels to the specified value if the current data type is unknown.
Definition: Helpers.inl:257
Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps=Steps(), bool skip_border=false, BorderSize border_size=BorderSize())
Calculate the maximum window for a given tensor shape and border setting.
Definition: Helpers.cpp:28
void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
Add the passed 3D tensor's parameters to the object's kernel's arguments starting from the index idx.
Definition: ICLKernel.h:158
Copyright (c) 2017-2020 ARM Limited.
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
Definition: Helpers.inl:202
1 channel, 1 F16 per channel
void add_option(std::string option)
Adds option to the existing build option list.
bool update_window_and_padding(Window &win, Ts &&... patterns)
Update window and padding size for each of the access patterns.
Definition: Helpers.h:402
static constexpr unsigned int num_arguments_per_3D_tensor()
Returns the number of arguments enqueued per 3D tensor object.
Definition: ICLKernel.h:200
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
Window collapse_if_possible(const Window &full_window, size_t first, size_t last, bool *has_collapsed=nullptr) const
Collapse the dimensions between first and last if possible.
Definition: Window.inl:68
std::string float_to_string_with_full_precision(float val)
Create a string with the float in full precision.
Definition: Utils.h:1211
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
quantized, asymmetric fixed-point 8-bit number unsigned
BorderSize border_size() const override
The size of the border for that kernel.
std::string kernel_name
size_t total_size() const
Collapses all dimensions to a single linear total size.
Definition: TensorShape.h:171
UniformQuantizationInfo uniform() const
Return per layer quantization info.
std::string get_cl_type_from_data_type(const DataType &dt)
Translates a tensor data type to the appropriate OpenCL type.
Definition: CLHelpers.cpp:37
RoundingPolicy
Rounding method.
Definition: Rounding.h:30
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
bool have_different_dimensions(const Dimensions< T > &dim1, const Dimensions< T > &dim2, unsigned int upper_dim)
Definition: Validate.h:51
void add_option_if(bool cond, std::string option)
Adds option if a given condition is true;.
std::unique_ptr< Kernel > create_kernel()
Helper function to create and return a unique_ptr pointed to a CL/GLES kernel object.
Definition: Helpers.h:86
bool slide_window_slice_3D(Window &slice) const
Slide the passed 3D window slice.
Definition: Window.h:333
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
1 channel, 1 S16 per channel
void run(const Window &window, cl::CommandQueue &queue) override
Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1139
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
BorderSize border_size() const override
The size of the border for that kernel.
Interface for OpenCL tensor.
Definition: ICLTensor.h:42
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
#define ARM_COMPUTE_CREATE_ERROR(error_code, msg)
Creates an error with a given message.
Definition: Error.h:159
static constexpr size_t DimZ
Alias for dimension 2 also known as Z dimension.
Definition: Window.h:47
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:122
bool set_shape_if_empty(ITensorInfo &info, const TensorShape &shape)
Set the shape to the specified value if the current assignment is empty.
Definition: Helpers.inl:235
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
Static function to check if given info will lead to a valid configuration of CLPixelWiseMultiplicatio...
unsigned int num_elems_processed_per_iteration
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output)
Initialise the kernel's input, output and border mode.
bool set_format_if_unknown(ITensorInfo &info, Format format)
Set the format, data type and number of channels to the specified value if the current data type is u...
Definition: Helpers.inl:246
void run(const Window &window, cl::CommandQueue &queue) override
Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
quantized, asymmetric fixed-point 8-bit number signed
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
Initialise the kernel's input, output and border mode.
Window first_slice_window_3D() const
First 3D slice of the window.
Definition: Window.h:289
#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)
Definition: Validate.h:205
Truncates the least significant values that are lost in operations.
Describe a multidimensional execution window.
Definition: Window.h:39
ConvertPolicy
Policy to handle overflow.
Definition: Types.h:359
#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)
Definition: Validate.h:941
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:1097
SimpleTensor< T > slice(const SimpleTensor< T > &src, Coordinates starts, Coordinates ends)
void add_option_if_else(bool cond, std::string option_true, std::string option_false)
Adds first option if condition is true else the second one.