Compute Library
 22.11
ClFullyConnected.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
32 
40 
41 #include "src/common/utils/Log.h"
42 #include "support/Cast.h"
43 
44 #include <algorithm>
45 
46 namespace arm_compute
47 {
48 namespace opencl
49 {
50 using namespace arm_compute::experimental;
52 
53 namespace
54 {
55 Status construct_gemmlowp_output_stage(const ITensorInfo &src, const ITensorInfo &weights, const ITensorInfo &dst,
56  GEMMLowpOutputStageInfo &gemmlowp_output_stage, ActivationLayerInfo activation_info)
57 {
58  gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
59  gemmlowp_output_stage.gemmlowp_offset = 0;
60  gemmlowp_output_stage.gemmlowp_multiplier = 0;
61  gemmlowp_output_stage.gemmlowp_shift = 0;
62 
63  const auto data_type = src.data_type();
64 
65  // Configure output stage for quantized case
67  {
68  const QuantizationInfo oq_info = dst.quantization_info();
69  const UniformQuantizationInfo iq_unif = src.quantization_info().uniform();
70  const UniformQuantizationInfo wq_unif = weights.quantization_info().uniform();
71  const UniformQuantizationInfo oq_unif = oq_info.uniform();
72 
73  const auto output_quant_info = (dst.total_size() == 0) ? iq_unif : oq_unif;
74 
75  const float multiplier = (iq_unif.scale * wq_unif.scale) / output_quant_info.scale;
76  int output_multiplier = 0;
77  int output_shift = 0;
78  ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
79 
80  PixelValue type_min{};
81  PixelValue type_max{};
82  std::tie(type_min, type_max) = get_min_max(data_type);
83 
84  if(activation_info.enabled())
85  {
86  std::tie(type_min, type_max) = get_quantized_activation_min_max(activation_info, data_type, output_quant_info);
87  }
88 
89  // Set the GEMMLowp output stage info
90  gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
91  gemmlowp_output_stage.gemmlowp_multiplier = output_multiplier;
92  gemmlowp_output_stage.gemmlowp_shift = output_shift;
93  gemmlowp_output_stage.gemmlowp_multipliers.push_back(output_multiplier);
94  gemmlowp_output_stage.gemmlowp_shifts.push_back(output_shift);
95  type_min.get(gemmlowp_output_stage.gemmlowp_min_bound);
96  type_max.get(gemmlowp_output_stage.gemmlowp_max_bound);
97  }
98 
99  return Status{};
100 }
101 
102 Status validate_mm(const ITensorInfo &src, const ITensorInfo &weights, const ITensorInfo *bias, const ITensorInfo &dst, const FullyConnectedLayerInfo &fc_info)
103 {
104  GEMMLowpOutputStageInfo gemmlowp_output_stage;
105  ARM_COMPUTE_RETURN_ON_ERROR(construct_gemmlowp_output_stage(src, weights, dst, gemmlowp_output_stage, fc_info.activation_info));
106 
107  const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
108  false, // is_b_reshaped
109  true, // reshape_b_only_on_first_run
110  0, // depth_output_gemm3d
111  false, // reinterpret_input_as_3d
112  fc_info.retain_internal_weights, // retain_internal_weights
113  gemmlowp_output_stage, // gemmlowp_output_stage
114  fc_info.fp_mixed_precision, // fp_mixed_precision
115  false, // fast_math
116  true, // broadcast_bias
117  ActivationLayerInfo()); // activation_info
118 
119  if(is_data_type_quantized_asymmetric(src.data_type()))
120  {
121  const UniformQuantizationInfo iq_info = src.quantization_info().uniform();
122  const UniformQuantizationInfo wq_info = weights.quantization_info().uniform();
123 
124  // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
125  // Extract and negate src and weights offset
126  const QuantizationInfo src_quantization_info(iq_info.scale, -iq_info.offset);
127  const QuantizationInfo weights_quantization_info(wq_info.scale, -wq_info.offset);
128 
129  // Validate gemmlowp function
130  ARM_COMPUTE_RETURN_ON_ERROR(ClGemmLowpMatrixMultiplyCore::validate(&src.clone()->set_quantization_info(src_quantization_info),
131  &weights.clone()->set_quantization_info(weights_quantization_info),
132  bias,
133  &dst,
134  gemm_info));
135  }
136  else
137  {
138  ARM_COMPUTE_RETURN_ON_ERROR(ClGemm::validate(&src, &weights, bias, &dst, 1.f, 1.f, gemm_info));
139  }
140 
141  return Status{};
142 }
143 } // namespace
144 
146  : _convert_weights(nullptr),
147  _flatten(nullptr),
148  _reshape_weights(nullptr),
149  _mm_gemm(nullptr),
150  _mm_gemmlowp(nullptr),
151  _aux_mem(Count)
152 {
153 }
154 
156 
157 void ClFullyConnected::configure_mm(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *dst,
158  const FullyConnectedLayerInfo &fc_info)
159 {
160  GEMMLowpOutputStageInfo gemmlowp_output_stage;
161  construct_gemmlowp_output_stage(*src, *weights, *dst, gemmlowp_output_stage, fc_info.activation_info);
162 
163  const GEMMInfo &gemm_info = GEMMInfo(false, // is_a_reshaped
164  false, // is_b_reshaped
165  true, // reshape_b_only_on_first_run
166  0, // depth_output_gemm3d
167  false, // reinterpret_input_as_3d
168  fc_info.retain_internal_weights, // retain_internal_weights
169  gemmlowp_output_stage, // gemmlowp_output_stage
170  fc_info.fp_mixed_precision, // fp_mixed_precision
171  false, // fast_math
172  true, // broadcast_bias
173  fc_info.activation_info); // activation_info
174 
175  if(_is_quantized)
176  {
177  // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
178  // Extract and negate input and weights offset
179  const QuantizationInfo src_quantization_info = src->quantization_info();
180  const QuantizationInfo weights_quantization_info = weights->quantization_info();
181 
182  TensorInfo src_info = src->clone()->set_quantization_info(src_quantization_info);
183  TensorInfo weights_info = weights->clone()->set_quantization_info(weights_quantization_info);
184 
185  src_info.set_quantization_info(QuantizationInfo(src_quantization_info.uniform().scale, -src_quantization_info.uniform().offset));
186  weights_info.set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
187 
188  // Configure gemmlowp function
189  _mm_gemmlowp = std::make_unique<ClGemmLowpMatrixMultiplyCore>();
190  _mm_gemmlowp->configure(compile_context, &src_info, &weights_info, bias, dst, gemm_info);
191  }
192  else
193  {
194  // Configure matrix multiply kernel
195  _mm_gemm = std::make_unique<ClGemm>();
196  _mm_gemm->configure(compile_context, src, weights, bias, dst, 1.f, 1.f, gemm_info);
197  }
198 }
199 
200 void ClFullyConnected::configure_conv_fc(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *dst,
201  const FullyConnectedLayerInfo &fc_info)
202 {
203  ARM_COMPUTE_ERROR_ON((weights->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
204 
205  // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
206 
207  // Initialize output tensor for flatten
208  _flattened_src = src->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(src)).set_data_layout(DataLayout::NCHW);
209 
210  // Configure flatten kernel
211  _flatten = std::make_unique<ClFlatten>();
212  _flatten->configure(compile_context, src, &_flattened_src);
213 
214  // Configure matrix multiply kernel
215  configure_mm(compile_context, &_flattened_src, weights, bias, dst, fc_info);
216 }
217 
218 void ClFullyConnected::configure_fc_fc(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *dst,
219  const FullyConnectedLayerInfo &fc_info)
220 {
221  ARM_COMPUTE_ERROR_ON(src->dimension(0) != weights->dimension(1));
222 
223  // Configure matrix multiply kernel
224  configure_mm(compile_context, src, weights, bias, dst, fc_info);
225 }
226 
227 void ClFullyConnected::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
228  FullyConnectedLayerInfo fc_info)
229 {
230  ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
231 
232  // Perform validate step
233  ARM_COMPUTE_ERROR_THROW_ON(ClFullyConnected::validate(src, weights, biases, dst, fc_info));
234  ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, fc_info);
235 
236  _are_weights_converted = true;
237  _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
238  _is_fc_after_conv = true;
239  _is_quantized = is_data_type_quantized_asymmetric(src->data_type());
240  _is_prepared = fc_info.retain_internal_weights;
241  _weights_to_use = TensorInfo(*weights);
242  _weights_to_use_idx = ACL_SRC_1;
243 
244  // With the Fully Connected layer we can have 4 different cases:
245  // 1) Convolution layer -> Fully Connected layer without batches
246  // 2) Fully Connected layer -> Fully Connected layer without batches
247  // 3) Convolution layer -> Fully Connected layer with batches
248  // 4) Fully Connected layer -> Fully Connected layer with batches
249 
250  // Check if we have a fully connected layer with batches
251  const bool is_batched_fc_layer = dst->dimension(1) > 1;
252  if(is_batched_fc_layer)
253  {
254  _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3,
255  src->tensor_shape().cend(),
256  dst->tensor_shape().cbegin() + 1));
257  }
258  else
259  {
260  _is_fc_after_conv = src->num_dimensions() > 1;
261  }
262 
263  ITensorInfo *weights_used = weights;
264 
265  // Reshape weights if needed
266  if(!_are_weights_reshaped)
267  {
268  // Reshape the weights
269  _reshape_weights = std::make_unique<ClTranspose>();
270  _reshape_weights->configure(compile_context, weights, &_reshaped_weights);
271  weights_used = &_reshaped_weights;
272  _weights_to_use_idx = offset_int_vec(TransposedWeights);
273  }
274 
275  // Convert weights if needed
276  if(_is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
277  {
278  // Convert weights
279  _convert_weights = std::make_unique<ClConvertFullyConnectedWeights>();
280  _convert_weights->configure(compile_context,
281  weights_used,
282  &_converted_weights,
283  src->tensor_shape(),
284  fc_info.weights_trained_layout);
285 
286  weights_used = &_converted_weights;
287  _weights_to_use_idx = offset_int_vec(ConvertedWeights);
288  _are_weights_converted = false;
289  }
290 
291  if(_is_fc_after_conv)
292  {
293  // Fully Connected layer after a Convolution Layer without batches
294  configure_conv_fc(compile_context, src, weights_used, biases, dst, fc_info);
295  }
296  else
297  {
298  // Fully Connected layer after a Fully Connected Layer without batches
299  configure_fc_fc(compile_context, src, weights_used, biases, dst, fc_info);
300  }
301  // Update TensorInfo of final weights used (Need to be done in the end due to padding expansion)
302  _weights_to_use = *weights_used;
303 
304  // Set auxiliary memory requirements
305  auto gemm_mem_req = (_is_quantized) ? _mm_gemmlowp->workspace() : _mm_gemm->workspace();
306  for(unsigned int i = 0; i < gemm_mem_req.size(); ++i)
307  {
308  _aux_mem[i] = gemm_mem_req[i];
309  }
310  if(_aux_mem[1].size > 0 || _aux_mem[2].size > 0) // Persistent weights memory on GEMMs
311  {
312  // Release permuted weights at the of prepare as they are further transposed by the assembly dispatch
313  _aux_mem[TransposedWeights] = MemoryInfo(offset_int_vec(TransposedWeights), MemoryLifetime::Prepare, _reshaped_weights.total_size());
314  _aux_mem[ConvertedWeights] = MemoryInfo(offset_int_vec(ConvertedWeights), MemoryLifetime::Prepare, _converted_weights.total_size());
315  }
316  else
317  {
318  // Release permuted weights at the of prepare as they are further transposed by the assembly dispatch
319  const auto transposed_wei_lft = (_weights_to_use_idx == offset_int_vec(TransposedWeights)) ? MemoryLifetime::Persistent : MemoryLifetime::Prepare;
320  const auto converted_wei_lft = (_weights_to_use_idx == offset_int_vec(ConvertedWeights)) ? MemoryLifetime::Persistent : MemoryLifetime::Prepare;
321 
322  _aux_mem[TransposedWeights] = MemoryInfo(offset_int_vec(TransposedWeights), transposed_wei_lft, _reshaped_weights.total_size());
323  _aux_mem[ConvertedWeights] = MemoryInfo(offset_int_vec(ConvertedWeights), converted_wei_lft, _converted_weights.total_size());
324  }
325  _aux_mem[FlattenedSrc] = MemoryInfo(offset_int_vec(FlattenedSrc), MemoryLifetime::Temporary, _flattened_src.total_size());
326 }
327 
328 Status ClFullyConnected::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
329  FullyConnectedLayerInfo fc_info)
330 {
331  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
338 
339  bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
340  bool is_fc_after_conv = true;
341 
342  const ITensorInfo &flatten_src = TensorInfo(src->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(src)).set_data_layout(DataLayout::NCHW));
343  const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
344  const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
345 
346  // With the Fully Connected layer we can have 4 different cases:
347  // 1) Convolution layer -> Fully Connected layer without batches
348  // 2) Fully Connected layer -> Fully Connected layer without batches
349  // 3) Convolution layer -> Fully Connected layer with batches
350  // 4) Fully Connected layer -> Fully Connected layer with batches
351 
352  const ITensorInfo *src_to_use = src;
353  const ITensorInfo *weights_to_use = weights;
354 
355  if(biases != nullptr)
356  {
359  {
361  }
362  else
363  {
365  }
366  }
367 
368  // Check if we have a fully connected layer with batches
369  const bool is_batched_fc_layer = dst->dimension(1) > 1;
370  if(is_batched_fc_layer)
371  {
372  is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3,
373  src->tensor_shape().cend(),
374  dst->tensor_shape().cbegin() + 1));
375  }
376  else
377  {
378  is_fc_after_conv = src->num_dimensions() > 1;
379  }
380 
381  if(!weights_reshaped)
382  {
383  // Validate reshape weights kernel
384  ARM_COMPUTE_RETURN_ON_ERROR(ClTranspose::validate(weights, &reshaped_weights));
385  weights_to_use = &reshaped_weights;
386  }
387 
388  if(is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
389  {
390  // Validate convert weights kernel
392  &converted_weights,
393  src->tensor_shape(),
394  fc_info.weights_trained_layout));
395  weights_to_use = &converted_weights;
396  }
397 
398  if(is_fc_after_conv)
399  {
400  // Fully Connected layer after a Convolution Layer without batches
401  ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
402 
403  // Validate flatten kernel
405  src_to_use = &flatten_src;
406  }
407  else
408  {
409  // Fully Connected layer after a Fully Connected Layer without batches
410  ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != weights_to_use->dimension(1));
411  }
412 
413  // Validate matrix multiply kernel
414  ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*src_to_use, *weights_to_use, biases, *dst, fc_info));
415 
416  return Status{};
417 }
418 
420 {
421  prepare(tensors);
422 
423  auto src = tensors.get_const_tensor(ACL_SRC_0);
424 
425  CLAuxTensorHandler flattened_src(offset_int_vec(FlattenedSrc), _flattened_src, tensors, false);
426  CLAuxTensorHandler weights(_weights_to_use_idx, _weights_to_use, tensors, false);
427 
428  // Linearize input if it comes from a convolutional layer
429  if(_is_fc_after_conv)
430  {
431  ITensorPack flatten_pack{ { ACL_SRC, src }, { ACL_DST, flattened_src.get() } };
432  _flatten->run(flatten_pack);
433  }
434 
435  ITensorPack gemm_pack = tensors;
436  gemm_pack.add_const_tensor(ACL_SRC_0, (_is_fc_after_conv) ? flattened_src.get() : src);
437  if(_weights_to_use_idx != ACL_SRC_1)
438  {
439  gemm_pack.add_const_tensor(ACL_SRC_1, weights.get());
440  }
441 
442  // Run matrix multiply
443  if(_is_quantized)
444  {
445  _mm_gemmlowp->run(gemm_pack);
446  }
447  else
448  {
449  _mm_gemm->run(gemm_pack);
450  }
451 }
452 
454 {
455  if(!_is_prepared)
456  {
457  auto weights = tensors.get_const_tensor(ACL_SRC_1);
458 
459  CLAuxTensorHandler reshaped_weights(offset_int_vec(TransposedWeights), _reshaped_weights, tensors, false);
460  CLAuxTensorHandler converted_weights(offset_int_vec(ConvertedWeights), _converted_weights, tensors, false);
461 
462  // Pointer to current weights
463  const ITensor *cur_weights = weights;
464 
465  // Reshape of the weights if needed (happens only once)
466  if(!_are_weights_reshaped)
467  {
468  // Run reshape weights kernel and mark weights as unused
469  ITensorPack transpose_pack{ { ACL_SRC, weights }, { ACL_DST, reshaped_weights.get() } };
470  _reshape_weights->run(transpose_pack);
471 
472  cur_weights->mark_as_unused();
473  cur_weights = reshaped_weights.get();
474 
475  _are_weights_reshaped = true;
476  }
477 
478  // Convert weights if needed (happens only once)
479  if(!_are_weights_converted)
480  {
481  ITensorPack convert_pack{ { ACL_SRC, cur_weights }, { ACL_DST, converted_weights.get() } };
482  _convert_weights->run(convert_pack);
483 
484  cur_weights->mark_as_unused();
485  cur_weights = converted_weights.get();
486 
487  _are_weights_converted = true;
488  }
489 
490  tensors.add_const_tensor(ACL_SRC_1, cur_weights);
491 
492  // Prepare GEMM prepare and release unused weights
493  if(!_is_quantized)
494  {
495  _mm_gemm->prepare(tensors);
496  }
497  else
498  {
499  _mm_gemmlowp->prepare(tensors);
500  }
501  _is_prepared = true;
502  }
503 }
504 
506 {
507  return _aux_mem;
508 }
509 } // namespace opencl
510 } // namespace arm_compute
bool is_data_type_quantized(DataType dt)
Check if a given data type is of quantized type.
Definition: Utils.h:1030
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const TensorShape &original_src_shape, DataLayout data_layout)
Static function to check if given info will lead to a valid configuration.
Quantize using a fixed point multiplication.
bool enabled() const
Check if initialised.
Definition: Types.h:1694
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
void add_const_tensor(int id, const ITensor *tensor)
Add const tensor to the pack.
Definition: ITensorPack.cpp:49
bool retain_internal_weights
Retain internal reshaped weights.
Definition: Types.h:1817
void prepare(ITensorPack &tensors) override
Prepare the function for executing.
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
void run(ITensorPack &tensors) override
Run the kernels contained in the function.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Fully connected layer info.
Definition: Types.h:1809
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
Status calculate_quantized_multiplier(float multiplier, int32_t *quant_multiplier, int32_t *shift, bool ignore_epsilon=false)
Calculate quantized representation of multiplier.
Status class.
Definition: Error.h:52
void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst, FullyConnectedLayerInfo fc_info=FullyConnectedLayerInfo())
Set the input and output tensors.
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
Interface for CPU tensor.
Definition: ITensor.h:36
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2022 Arm Limited.
std::vector< MemoryInfo > MemoryRequirements
Definition: Types.h:134
1 channel, 1 F16 per channel
ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info) override
Set the quantization settings (scale and offset) of the tensor.
Definition: TensorInfo.cpp:366
TensorShape compute_transposed_shape(const ITensorInfo &input)
Calculate the transposed shape of a tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:159
void mark_as_unused() const
Marks a tensor as unused.
Definition: ITensor.cpp:168
1 channel, 1 S32 per channel
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration.
TensorShape compute_flatten_shape(const ITensorInfo *input)
Calculate the flattened output shape of a tensor.
Interface to enqueue OpenCL kernels and get/set the OpenCL CommandQueue and ICLTuner.
const ITensor * get_const_tensor(int id) const
Get constant tensor of a given id.
Definition: ITensorPack.cpp:54
Quantization information.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
std::pair< int32_t, int32_t > get_quantized_activation_min_max(ActivationLayerInfo act_info, DataType data_type, UniformQuantizationInfo oq_info)
Returns a pair of minimum and maximum values for a quantized activation.
Definition: Utils.cpp:558
quantized, asymmetric fixed-point 8-bit number unsigned
bool are_weights_reshaped
Reshape the weights tensor if false.
Definition: Types.h:1816
size_t total_size() const override
Returns the total size of the tensor in bytes.
Definition: TensorInfo.h:250
UniformQuantizationInfo uniform() const
Return per layer quantization info.
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
GEMMLowp output stage info.
Definition: Types.h:2287
virtual bool are_values_constant() const =0
Flag indicating whether the values of the tensor are constant, meaning that they can change on kernel...
ActivationLayerInfo activation_info
Fused activation to apply after the matrix multiplication.
Definition: Types.h:1812
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, FullyConnectedLayerInfo fc_info=FullyConnectedLayerInfo())
Static function to check if given info will lead to a valid configuration.
Num samples, channels, height, width.
CLCompileContext class.
static Status validate(const ITensorInfo *src, const ITensorInfo *dst)
Static function to check if given info will lead to a valid configuration.
Definition: ClTranspose.cpp:43
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1052
TensorInfo src_info(src_shape, 1, data_type)
std::array< T, num_max_dimensions >::const_iterator cend() const
Returns a read-only (constant) iterator that points one past the last element in the dimension array...
Definition: Dimensions.h:255
std::array< T, num_max_dimensions >::const_iterator cbegin() const
Returns a read-only (constant) iterator that points to the first element in the dimension array...
Definition: Dimensions.h:231
DataLayout weights_trained_layout
Layout that the weights have been trained with.
Definition: Types.h:1814
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:541
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:788
bool fp_mixed_precision
Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy.
Definition: Types.h:1820
bool transpose_weights
Transpose weights if true.
Definition: Types.h:1815
Tensor packing service.
Definition: ITensorPack.h:39
#define ARM_COMPUTE_LOG_PARAMS(...)
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:157
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
int offset_int_vec(int offset)
Definition: MemoryHelpers.h:38
GEMM information class.
Definition: Types.h:2339
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1679
quantized, asymmetric fixed-point 8-bit number signed
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info)
Static function to check if given info will lead to a valid configuration.
Definition: ClGemm.cpp:612
static constexpr size_t num_max_dimensions
Number of dimensions the tensor has.
Definition: Dimensions.h:46
static Status validate(const ITensorInfo *src, const ITensorInfo *dst)
Static function to check if given info will lead to a valid configuration.
Definition: ClFlatten.cpp:43
std::tuple< PixelValue, PixelValue > get_min_max(DataType dt)
Compute the mininum and maximum values a data type can take.
Definition: Utils.h:564
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
experimental::MemoryRequirements workspace() const override
Return the memory requirements required by the workspace.
const int32_t * bias