Compute Library
 21.08
ClFullyConnected.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
32 
40 
41 #include "support/Cast.h"
42 
43 #include <algorithm>
44 
45 namespace arm_compute
46 {
47 namespace opencl
48 {
49 using namespace arm_compute::experimental;
51 
52 namespace
53 {
54 Status construct_gemmlowp_output_stage(const ITensorInfo &src, const ITensorInfo &weights, const ITensorInfo &dst,
55  GEMMLowpOutputStageInfo &gemmlowp_output_stage, ActivationLayerInfo activation_info)
56 {
57  gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
58  gemmlowp_output_stage.gemmlowp_offset = 0;
59  gemmlowp_output_stage.gemmlowp_multiplier = 0;
60  gemmlowp_output_stage.gemmlowp_shift = 0;
61 
62  const auto data_type = src.data_type();
63 
64  // Configure output stage for quantized case
66  {
67  const QuantizationInfo oq_info = dst.quantization_info();
68  const UniformQuantizationInfo iq_unif = src.quantization_info().uniform();
69  const UniformQuantizationInfo wq_unif = weights.quantization_info().uniform();
70  const UniformQuantizationInfo oq_unif = oq_info.uniform();
71 
72  const auto output_quant_info = (dst.total_size() == 0) ? iq_unif : oq_unif;
73 
74  const float multiplier = (iq_unif.scale * wq_unif.scale) / output_quant_info.scale;
75  int output_multiplier = 0;
76  int output_shift = 0;
77  ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
78 
79  PixelValue type_min{};
80  PixelValue type_max{};
81  std::tie(type_min, type_max) = get_min_max(data_type);
82 
83  if(activation_info.enabled())
84  {
85  std::tie(type_min, type_max) = get_quantized_activation_min_max(activation_info, data_type, output_quant_info);
86  }
87 
88  // Set the GEMMLowp output stage info
89  gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
90  gemmlowp_output_stage.gemmlowp_multiplier = output_multiplier;
91  gemmlowp_output_stage.gemmlowp_shift = output_shift;
92  gemmlowp_output_stage.gemmlowp_multipliers.push_back(output_multiplier);
93  gemmlowp_output_stage.gemmlowp_shifts.push_back(output_shift);
94  type_min.get(gemmlowp_output_stage.gemmlowp_min_bound);
95  type_max.get(gemmlowp_output_stage.gemmlowp_max_bound);
96  }
97 
98  return Status{};
99 }
100 
101 Status validate_mm(const ITensorInfo &src, const ITensorInfo &weights, const ITensorInfo *bias, const ITensorInfo &dst, const FullyConnectedLayerInfo &fc_info)
102 {
103  GEMMLowpOutputStageInfo gemmlowp_output_stage;
104  ARM_COMPUTE_RETURN_ON_ERROR(construct_gemmlowp_output_stage(src, weights, dst, gemmlowp_output_stage, fc_info.activation_info));
105 
106  const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
107  false, // is_b_reshaped
108  true, // reshape_b_only_on_first_run
109  0, // depth_output_gemm3d
110  false, // reinterpret_input_as_3d
111  fc_info.retain_internal_weights, // retain_internal_weights
112  gemmlowp_output_stage, // gemmlowp_output_stage
113  fc_info.fp_mixed_precision, // fp_mixed_precision
114  false, // fast_math
115  true, // broadcast_bias
116  ActivationLayerInfo()); // activation_info
117 
118  if(is_data_type_quantized_asymmetric(src.data_type()))
119  {
120  const UniformQuantizationInfo iq_info = src.quantization_info().uniform();
121  const UniformQuantizationInfo wq_info = weights.quantization_info().uniform();
122 
123  // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
124  // Extract and negate src and weights offset
125  const QuantizationInfo src_quantization_info(iq_info.scale, -iq_info.offset);
126  const QuantizationInfo weights_quantization_info(wq_info.scale, -wq_info.offset);
127 
128  // Validate gemmlowp function
129  ARM_COMPUTE_RETURN_ON_ERROR(ClGemmLowpMatrixMultiplyCore::validate(&src.clone()->set_quantization_info(src_quantization_info),
130  &weights.clone()->set_quantization_info(weights_quantization_info),
131  bias,
132  &dst,
133  gemm_info));
134  }
135  else
136  {
137  ARM_COMPUTE_RETURN_ON_ERROR(ClGemm::validate(&src, &weights, bias, &dst, 1.f, 1.f, gemm_info));
138  }
139 
140  return Status{};
141 }
142 } // namespace
143 
145  : _convert_weights(nullptr),
146  _flatten(nullptr),
147  _reshape_weights(nullptr),
148  _mm_gemm(nullptr),
149  _mm_gemmlowp(nullptr),
150  _aux_mem(Count)
151 {
152 }
153 
155 
156 void ClFullyConnected::configure_mm(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *dst,
157  const FullyConnectedLayerInfo &fc_info)
158 {
159  GEMMLowpOutputStageInfo gemmlowp_output_stage;
160  construct_gemmlowp_output_stage(*src, *weights, *dst, gemmlowp_output_stage, fc_info.activation_info);
161 
162  const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
163  false, // is_b_reshaped
164  true, // reshape_b_only_on_first_run
165  0, // depth_output_gemm3d
166  false, // reinterpret_input_as_3d
167  fc_info.retain_internal_weights, // retain_internal_weights
168  gemmlowp_output_stage, // gemmlowp_output_stage
169  fc_info.fp_mixed_precision, // fp_mixed_precision
170  false, // fast_math
171  true, // broadcast_bias
172  fc_info.activation_info, // activation_info
173  fc_info.constant_weights); // constant_weights
174 
175  if(_is_quantized)
176  {
177  // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
178  // Extract and negate input and weights offset
179  const QuantizationInfo src_quantization_info = src->quantization_info();
180  const QuantizationInfo weights_quantization_info = weights->quantization_info();
181 
182  TensorInfo src_info = src->clone()->set_quantization_info(src_quantization_info);
183  TensorInfo weights_info = weights->clone()->set_quantization_info(weights_quantization_info);
184 
185  src_info.set_quantization_info(QuantizationInfo(src_quantization_info.uniform().scale, -src_quantization_info.uniform().offset));
186  weights_info.set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
187 
188  // Configure gemmlowp function
189  _mm_gemmlowp = std::make_unique<ClGemmLowpMatrixMultiplyCore>();
190  _mm_gemmlowp->configure(compile_context, &src_info, &weights_info, bias, dst, gemm_info);
191  }
192  else
193  {
194  // Configure matrix multiply kernel
195  _mm_gemm = std::make_unique<ClGemm>();
196  _mm_gemm->configure(compile_context, src, weights, bias, dst, 1.f, 1.f, gemm_info);
197  }
198 }
199 
200 void ClFullyConnected::configure_conv_fc(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *dst,
201  const FullyConnectedLayerInfo &fc_info)
202 {
203  ARM_COMPUTE_ERROR_ON((weights->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
204 
205  // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
206 
207  // Initialize output tensor for flatten
208  _flattened_src = src->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(src)).set_data_layout(DataLayout::NCHW);
209 
210  // Configure flatten kernel
211  _flatten = std::make_unique<ClFlatten>();
212  _flatten->configure(compile_context, src, &_flattened_src);
213 
214  // Configure matrix multiply kernel
215  configure_mm(compile_context, &_flattened_src, weights, bias, dst, fc_info);
216 }
217 
218 void ClFullyConnected::configure_fc_fc(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *dst,
219  const FullyConnectedLayerInfo &fc_info)
220 {
221  ARM_COMPUTE_ERROR_ON(src->dimension(0) != weights->dimension(1));
222 
223  // Configure matrix multiply kernel
224  configure_mm(compile_context, src, weights, bias, dst, fc_info);
225 }
226 
227 void ClFullyConnected::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
228  FullyConnectedLayerInfo fc_info)
229 {
230  ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
231 
232  // Perform validate step
233  ARM_COMPUTE_ERROR_THROW_ON(ClFullyConnected::validate(src, weights, biases, dst, fc_info));
234 
235  _are_weights_converted = true;
236  _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
237  _is_fc_after_conv = true;
238  _is_quantized = is_data_type_quantized_asymmetric(src->data_type());
239  _is_prepared = fc_info.retain_internal_weights;
240  _weights_to_use = TensorInfo(*weights);
241  _weights_to_use_idx = ACL_SRC_1;
242 
243  // With the Fully Connected layer we can have 4 different cases:
244  // 1) Convolution layer -> Fully Connected layer without batches
245  // 2) Fully Connected layer -> Fully Connected layer without batches
246  // 3) Convolution layer -> Fully Connected layer with batches
247  // 4) Fully Connected layer -> Fully Connected layer with batches
248 
249  // Check if we have a fully connected layer with batches
250  const bool is_batched_fc_layer = dst->dimension(1) > 1;
251  if(is_batched_fc_layer)
252  {
253  _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3,
254  src->tensor_shape().cend(),
255  dst->tensor_shape().cbegin() + 1));
256  }
257  else
258  {
259  _is_fc_after_conv = src->num_dimensions() > 1;
260  }
261 
262  ITensorInfo *weights_used = weights;
263 
264  // Reshape weights if needed
265  if(!_are_weights_reshaped)
266  {
267  // Reshape the weights
268  _reshape_weights = std::make_unique<ClTranspose>();
269  _reshape_weights->configure(compile_context, weights, &_reshaped_weights);
270  weights_used = &_reshaped_weights;
271  _weights_to_use_idx = offset_int_vec(TransposedWeights);
272  }
273 
274  // Convert weights if needed
275  if(_is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
276  {
277  // Convert weights
278  _convert_weights = std::make_unique<ClConvertFullyConnectedWeights>();
279  _convert_weights->configure(compile_context,
280  weights_used,
281  &_converted_weights,
282  src->tensor_shape(),
283  fc_info.weights_trained_layout);
284 
285  weights_used = &_converted_weights;
286  _weights_to_use_idx = offset_int_vec(ConvertedWeights);
287  _are_weights_converted = false;
288  }
289 
290  if(_is_fc_after_conv)
291  {
292  // Fully Connected layer after a Convolution Layer without batches
293  configure_conv_fc(compile_context, src, weights_used, biases, dst, fc_info);
294  }
295  else
296  {
297  // Fully Connected layer after a Fully Connected Layer without batches
298  configure_fc_fc(compile_context, src, weights_used, biases, dst, fc_info);
299  }
300  // Update TensorInfo of final weights used (Need to be done in the end due to padding expansion)
301  _weights_to_use = *weights_used;
302 
303  // Set auxiliary memory requirements
304  auto gemm_mem_req = (_is_quantized) ? _mm_gemmlowp->workspace() : _mm_gemm->workspace();
305  for(unsigned int i = 0; i < gemm_mem_req.size(); ++i)
306  {
307  _aux_mem[i] = gemm_mem_req[i];
308  }
309  if(_aux_mem[1].size > 0 || _aux_mem[2].size > 0) // Persistent weights memory on GEMMs
310  {
311  // Release permuted weights at the of prepare as they are further transposed by the assembly dispatch
312  _aux_mem[TransposedWeights] = MemoryInfo(offset_int_vec(TransposedWeights), MemoryLifetime::Prepare, _reshaped_weights.total_size());
313  _aux_mem[ConvertedWeights] = MemoryInfo(offset_int_vec(ConvertedWeights), MemoryLifetime::Prepare, _converted_weights.total_size());
314  }
315  else
316  {
317  // Release permuted weights at the of prepare as they are further transposed by the assembly dispatch
318  const auto transposed_wei_lft = (_weights_to_use_idx == offset_int_vec(TransposedWeights)) ? MemoryLifetime::Persistent : MemoryLifetime::Prepare;
319  const auto converted_wei_lft = (_weights_to_use_idx == offset_int_vec(ConvertedWeights)) ? MemoryLifetime::Persistent : MemoryLifetime::Prepare;
320 
321  _aux_mem[TransposedWeights] = MemoryInfo(offset_int_vec(TransposedWeights), transposed_wei_lft, _reshaped_weights.total_size());
322  _aux_mem[ConvertedWeights] = MemoryInfo(offset_int_vec(ConvertedWeights), converted_wei_lft, _converted_weights.total_size());
323  }
324  _aux_mem[FlattenedSrc] = MemoryInfo(offset_int_vec(FlattenedSrc), MemoryLifetime::Temporary, _flattened_src.total_size());
325 }
326 
327 Status ClFullyConnected::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
328  FullyConnectedLayerInfo fc_info)
329 {
330  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
337 
338  bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
339  bool is_fc_after_conv = true;
340 
341  const ITensorInfo &flatten_src = TensorInfo(src->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(src)).set_data_layout(DataLayout::NCHW));
342  const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
343  const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
344 
345  // With the Fully Connected layer we can have 4 different cases:
346  // 1) Convolution layer -> Fully Connected layer without batches
347  // 2) Fully Connected layer -> Fully Connected layer without batches
348  // 3) Convolution layer -> Fully Connected layer with batches
349  // 4) Fully Connected layer -> Fully Connected layer with batches
350 
351  const ITensorInfo *src_to_use = src;
352  const ITensorInfo *weights_to_use = weights;
353 
354  // Check if we have a fully connected layer with batches
355  const bool is_batched_fc_layer = dst->dimension(1) > 1;
356  if(is_batched_fc_layer)
357  {
358  is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3,
359  src->tensor_shape().cend(),
360  dst->tensor_shape().cbegin() + 1));
361  }
362  else
363  {
364  is_fc_after_conv = src->num_dimensions() > 1;
365  }
366 
367  if(!weights_reshaped)
368  {
369  // Validate reshape weights kernel
370  ARM_COMPUTE_RETURN_ON_ERROR(ClTranspose::validate(weights, &reshaped_weights));
371  weights_to_use = &reshaped_weights;
372  }
373 
374  if(is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
375  {
376  // Validate convert weights kernel
378  &converted_weights,
379  src->tensor_shape(),
380  fc_info.weights_trained_layout));
381  weights_to_use = &converted_weights;
382  }
383 
384  if(is_fc_after_conv)
385  {
386  // Fully Connected layer after a Convolution Layer without batches
387  ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
388 
389  // Validate flatten kernel
391  src_to_use = &flatten_src;
392  }
393  else
394  {
395  // Fully Connected layer after a Fully Connected Layer without batches
396  ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != weights_to_use->dimension(1));
397  }
398 
399  // Validate matrix multiply kernel
400  ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*src_to_use, *weights_to_use, biases, *dst, fc_info));
401 
402  return Status{};
403 }
404 
406 {
407  prepare(tensors);
408 
409  auto src = tensors.get_const_tensor(ACL_SRC_0);
410 
411  CLAuxTensorHandler flattened_src(offset_int_vec(FlattenedSrc), _flattened_src, tensors, false);
412  CLAuxTensorHandler weights(_weights_to_use_idx, _weights_to_use, tensors, false);
413 
414  // Linearize input if it comes from a convolutional layer
415  if(_is_fc_after_conv)
416  {
417  ITensorPack flatten_pack{ { ACL_SRC, src }, { ACL_DST, flattened_src.get() } };
418  _flatten->run(flatten_pack);
419  }
420 
421  ITensorPack gemm_pack = tensors;
422  gemm_pack.add_const_tensor(ACL_SRC_0, (_is_fc_after_conv) ? flattened_src.get() : src);
423  if(_weights_to_use_idx != ACL_SRC_1)
424  {
425  gemm_pack.add_const_tensor(ACL_SRC_1, weights.get());
426  }
427 
428  // Run matrix multiply
429  if(_is_quantized)
430  {
431  _mm_gemmlowp->run(gemm_pack);
432  }
433  else
434  {
435  _mm_gemm->run(gemm_pack);
436  }
437 }
438 
440 {
441  if(!_is_prepared)
442  {
443  auto weights = tensors.get_const_tensor(ACL_SRC_1);
444 
445  CLAuxTensorHandler reshaped_weights(offset_int_vec(TransposedWeights), _reshaped_weights, tensors, false);
446  CLAuxTensorHandler converted_weights(offset_int_vec(ConvertedWeights), _converted_weights, tensors, false);
447 
448  // Pointer to current weights
449  const ITensor *cur_weights = weights;
450 
451  // Reshape of the weights if needed (happens only once)
452  if(!_are_weights_reshaped)
453  {
454  // Run reshape weights kernel and mark weights as unused
455  ITensorPack transpose_pack{ { ACL_SRC, weights }, { ACL_DST, reshaped_weights.get() } };
456  _reshape_weights->run(transpose_pack);
457 
458  cur_weights->mark_as_unused();
459  cur_weights = reshaped_weights.get();
460 
461  _are_weights_reshaped = true;
462  }
463 
464  // Convert weights if needed (happens only once)
465  if(!_are_weights_converted)
466  {
467  ITensorPack convert_pack{ { ACL_SRC, cur_weights }, { ACL_DST, converted_weights.get() } };
468  _convert_weights->run(convert_pack);
469 
470  cur_weights->mark_as_unused();
471  cur_weights = converted_weights.get();
472 
473  _are_weights_converted = true;
474  }
475 
476  tensors.add_const_tensor(ACL_SRC_1, cur_weights);
477 
478  // Prepare GEMM prepare and release unused weights
479  if(!_is_quantized)
480  {
481  _mm_gemm->prepare(tensors);
482  }
483  else
484  {
485  _mm_gemmlowp->prepare(tensors);
486  }
487  _is_prepared = true;
488  }
489 }
490 
492 {
493  return _aux_mem;
494 }
495 } // namespace opencl
496 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:981
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const TensorShape &original_src_shape, DataLayout data_layout)
Static function to check if given info will lead to a valid configuration.
Quantize using a fixed point multiplication.
bool enabled() const
Check if initialised.
Definition: Types.h:1525
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
void add_const_tensor(int id, const ITensor *tensor)
Add const tensor to the pack.
Definition: ITensorPack.cpp:49
bool retain_internal_weights
Retain internal reshaped weights.
Definition: Types.h:1546
void prepare(ITensorPack &tensors) override
Prepare the function for executing.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
void run(ITensorPack &tensors) override
Run the kernels contained in the function.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Fully connected layer info.
Definition: Types.h:1538
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplier, int32_t *shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier.
Status class.
Definition: Error.h:52
void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst, FullyConnectedLayerInfo fc_info=FullyConnectedLayerInfo())
Set the input and output tensors.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Interface for CPU tensor.
Definition: ITensor.h:36
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2021 Arm Limited.
std::vector< MemoryInfo > MemoryRequirements
Definition: Types.h:113
1 channel, 1 F16 per channel
ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info) override
Set the quantization settings (scale and offset) of the tensor.
Definition: TensorInfo.cpp:345
TensorShape compute_transposed_shape(const ITensorInfo &input)
Calculate the transposed shape of a tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:159
void mark_as_unused() const
Marks a tensor as unused.
Definition: ITensor.cpp:168
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration.
TensorShape compute_flatten_shape(const ITensorInfo *input)
Calculate the flattened output shape of a tensor.
const DataType data_type
Definition: Im2Col.cpp:150
Interface to enqueue OpenCL kernels and get/set the OpenCL CommandQueue and ICLTuner.
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Definition: ITensorPack.cpp:54
Quantization information.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
std::pair< int32_t, int32_t > get_quantized_activation_min_max(ActivationLayerInfo act_info, DataType data_type, UniformQuantizationInfo oq_info)
Returns a pair of minimum and maximum values for a quantized activation.
Definition: Utils.cpp:488
quantized, asymmetric fixed-point 8-bit number unsigned
bool are_weights_reshaped
Reshape the weights tensor if false.
Definition: Types.h:1545
size_t total_size() const override
Returns the total size of the tensor in bytes.
Definition: TensorInfo.h:250
UniformQuantizationInfo uniform() const
Return per layer quantization info.
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
GEMMLowp output stage info.
Definition: Types.h:1888
ActivationLayerInfo activation_info
Fused activation to apply after the matrix multiplication.
Definition: Types.h:1541
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, FullyConnectedLayerInfo fc_info=FullyConnectedLayerInfo())
Static function to check if given info will lead to a valid configuration.
Num samples, channels, height, width.
CLCompileContext class.
static Status validate(const ITensorInfo *src, const ITensorInfo *dst)
Static function to check if given info will lead to a valid configuration.
Definition: ClTranspose.cpp:40
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1003
TensorInfo src_info(src_shape, 1, data_type)
std::array< T, num_max_dimensions >::const_iterator cend() const
Returns a read-only (constant) iterator that points one past the last element in the dimension array...
Definition: Dimensions.h:255
std::array< T, num_max_dimensions >::const_iterator cbegin() const
Returns a read-only (constant) iterator that points to the first element in the dimension array...
Definition: Dimensions.h:231
DataLayout weights_trained_layout
Layout that the weights have been trained with.
Definition: Types.h:1543
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:541
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
bool fp_mixed_precision
Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy.
Definition: Types.h:1549
bool transpose_weights
Transpose weights if true.
Definition: Types.h:1544
Tensor packing service.
Definition: ITensorPack.h:39
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
int offset_int_vec(int offset)
Definition: MemoryHelpers.h:38
GEMM information class.
Definition: Types.h:1939
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1510
quantized, asymmetric fixed-point 8-bit number signed
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info)
Static function to check if given info will lead to a valid configuration.
Definition: ClGemm.cpp:612
static constexpr size_t num_max_dimensions
Number of dimensions the tensor has.
Definition: Dimensions.h:46
bool constant_weights
If false, weights can vary between runs.
Definition: Types.h:1547
static Status validate(const ITensorInfo *src, const ITensorInfo *dst)
Static function to check if given info will lead to a valid configuration.
Definition: ClFlatten.cpp:40
std::tuple< PixelValue, PixelValue > get_min_max(DataType dt)
Compute the mininum and maximum values a data type can take.
Definition: Utils.h:564
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
experimental::MemoryRequirements workspace() const override
Return the memory requirements required by the workspace.