Compute Library
 22.05
FullyConnectedLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
30 #include "tests/NEON/Accessor.h"
32 #include "tests/datasets/FullyConnectedLayerDataset.h"
34 #include "tests/framework/Macros.h"
37 #include "tests/validation/fixtures/FullyConnectedLayerFixture.h"
38 
39 namespace arm_compute
40 {
41 namespace test
42 {
43 namespace validation
44 {
45 namespace
46 {
47 /** Tolerance for float operations */
48 constexpr RelativeTolerance<float> rel_tolerance_f32(0.01f); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F32 */
49 constexpr AbsoluteTolerance<float> abs_tolerance_f32(0.001f); /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F32 */
50 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
51 const AbsoluteTolerance<float> abs_tolerance_f16(0.3f); /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType::F16 */
52 const RelativeTolerance<half_float::half> rel_tolerance_f16(half_float::half(0.2f)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType::F16 */
53 constexpr float tolerance_num_f16 = 0.07f; /**< Tolerance number for FP16 */
54 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/
55 
56 /** Tolerance for quantized asymmetric operations */
57 constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
58 constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(1);
59 
60 /** CNN data types */
61 const auto CNNDataTypes = framework::dataset::make("DataType",
62 {
63 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
65 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
67 });
68 
69 const auto FullyConnectedParameters = combine(framework::dataset::make("TransposeWeights", { false, true }), framework::dataset::make("ReshapeWeights", { false, true }));
70 
71 const auto QuantizationData = framework::dataset::make("QuantizationInfo",
72 {
73  QuantizationInfo(1.f / 256.f, 10),
74  QuantizationInfo(1.1f, 10),
75 });
76 const auto EmptyActivationFunctionDataset = framework::dataset::make("ActivationInfo",
77 {
78  ActivationLayerInfo(),
79 });
80 const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
81 {
84  ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.75f, 0.25f),
86 });
87 
88 const auto ActivationFunctionsQuantizedDataset = framework::dataset::make("ActivationInfo",
89 {
92  ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.75f, 0.25f),
93 });
94 } // namespace
95 
96 TEST_SUITE(NEON)
97 TEST_SUITE(FullyConnectedLayer)
98 
99 /** Test case for memory injection in @ref cpu::CpuFullyConnected.
100  *
101  * Configure the operator once and inject memory at run-time in multiple executions.
102  *
103  * Checks performed in order:
104  * - Both runs compute the same output
105  */
106 TEST_CASE(MemoryInjection, framework::DatasetMode::ALL)
107 {
108  auto fc = std::make_unique<cpu::CpuFullyConnected>();
109  const auto src_info = TensorInfo(TensorShape(8U), 1, DataType::F32, DataLayout::NHWC);
110  const auto weight_info = TensorInfo(TensorShape(8U, 4U), 1, DataType::F32, DataLayout::NHWC);
111  const auto bias_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
112  auto dst_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
113  const auto fc_info = FullyConnectedLayerInfo{};
114  fc->configure(&src_info, &weight_info, &bias_info, &dst_info, fc_info);
115 
116  // telhs are newly created every call of this lambda function
117  auto src = create_tensor<Tensor>(src_info);
118  auto weight = create_tensor<Tensor>(weight_info);
119  auto bias = create_tensor<Tensor>(bias_info);
120  src.allocator()->allocate();
121  weight.allocator()->allocate();
122  bias.allocator()->allocate();
123 
124  ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias } };
125  ITensorPack prep_pack{ { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias } };
126 
127  auto mg = MemoryGroup{};
128  auto ws = manage_workspace<Tensor>(fc->workspace(), mg, run_pack, prep_pack);
129 
130  auto run_conv = [&]() -> Tensor
131  {
132  auto dst = create_tensor<Tensor>(dst_info);
133  dst.allocator()->allocate();
134  run_pack.add_tensor(TensorType::ACL_DST, &dst);
135 
136  library->fill_tensor_value(Accessor(src), 1.f);
137  library->fill_tensor_value(Accessor(weight), 2.f);
138  library->fill_tensor_value(Accessor(bias), 3.f);
139  // This operator is configured once and captured by this lambda.
140  fc->prepare(prep_pack);
141  fc->run(run_pack);
142  return dst;
143  };
144  auto result_0 = run_conv();
145  auto result_1 = run_conv();
146  for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
147  {
148  ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
149  }
150 }
151 
152 /** Test case for memory injection in @ref NEFullyConnectedLayer.
153  *
154  * Make sure @ref NEFullyConnectedLayer still works through injecting the memory at configure time using the old API.
155  *
156  * Checks performed in order:
157  * - Both runs compute the same output
158  */
159 TEST_CASE(MultipleExecutionWithConfigure, framework::DatasetMode::ALL)
160 {
161  auto fc = std::make_unique<NEFullyConnectedLayer>();
162  const auto src_info = TensorInfo(TensorShape(8U), 1, DataType::F32, DataLayout::NHWC);
163  const auto weight_info = TensorInfo(TensorShape(8U, 4U), 1, DataType::F32, DataLayout::NHWC);
164  const auto bias_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
165  auto dst_info = TensorInfo(TensorShape(4U), 1, DataType::F32, DataLayout::NHWC);
166  const auto fc_info = FullyConnectedLayerInfo{};
167  auto run_conv = [&]()
168  {
169  auto src = create_tensor<Tensor>(src_info);
170  auto weight = create_tensor<Tensor>(weight_info);
171  auto bias = create_tensor<Tensor>(bias_info);
172  auto dst = create_tensor<Tensor>(dst_info);
173  fc->configure(&src, &weight, &bias, &dst, fc_info);
174  src.allocator()->allocate();
175  weight.allocator()->allocate();
176  bias.allocator()->allocate();
177  dst.allocator()->allocate();
178  library->fill_tensor_value(Accessor(src), 1.f);
179  library->fill_tensor_value(Accessor(weight), 2.f);
180  library->fill_tensor_value(Accessor(bias), 3.f);
181  fc->run();
182  return dst;
183  };
184  auto result_0 = run_conv();
185  auto result_1 = run_conv();
186  for(size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
187  {
188  ARM_COMPUTE_EXPECT(((float *)result_0.buffer())[i] == ((float *)result_1.buffer())[i], framework::LogLevel::ERRORS);
189  }
190 }
191 
192 /** Unit test for @ref cpu::CpuFullyConnected with quantized multipler > 1
193  *
194  * Tests output correctness.
195  */
196 TEST_CASE(Quant8_Signed_Mult_gt_1, framework::DatasetMode::ALL)
197 {
198  auto fc = std::make_unique<cpu::CpuFullyConnected>();
199  const auto src_info = TensorInfo(TensorShape(1U, 3U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(0.5f, -1));
200  const auto weight_info = TensorInfo(TensorShape(1U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(0.5, -8));
201  const auto bias_info = TensorInfo(TensorShape(1U), 1, DataType::S32);
202  auto dst_info = TensorInfo(TensorShape(1U, 3U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(0.1f, 0));
203  const auto fc_info = FullyConnectedLayerInfo{};
204  fc->configure(&src_info, &weight_info, &bias_info, &dst_info, fc_info);
205 
206  // telhs are newly created every call of this lambda function
207  auto src = create_tensor<Tensor>(src_info);
208  auto weight = create_tensor<Tensor>(weight_info);
209  auto bias = create_tensor<Tensor>(bias_info);
210  auto dst = create_tensor<Tensor>(dst_info);
211  src.allocator()->allocate();
212  weight.allocator()->allocate();
213  bias.allocator()->allocate();
214  dst.allocator()->allocate();
215 
216  ITensorPack run_pack{ { TensorType::ACL_SRC_0, &src }, { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias }, { TensorType::ACL_DST, &dst } };
217  ITensorPack prep_pack{ { TensorType::ACL_SRC_1, &weight }, { TensorType::ACL_SRC_2, &bias } };
218 
219  auto mg = MemoryGroup{};
220  auto ws = manage_workspace<Tensor>(fc->workspace(), mg, run_pack, prep_pack);
221 
222  // Initialize input values
223  const std::vector<int8_t> src_values = { 3, 63, 31 };
224  const std::vector<int8_t> weight_values = { -4 };
225  const std::vector<int32_t> bias_values = { 16 };
226  const std::vector<int32_t> expected = { 80, 127, 127 };
227  library->fill_static_values(Accessor(src), src_values);
228  library->fill_static_values(Accessor(weight), weight_values);
229  library->fill_static_values(Accessor(bias), bias_values);
230 
231  // Run FC layer
232  fc->prepare(prep_pack);
233  fc->run(run_pack);
234 
235  auto dst_ptr = reinterpret_cast<int8_t *>(dst.buffer());
236  for(size_t i = 0; i < dst.info()->tensor_shape().total_size(); ++i)
237  {
238  ARM_COMPUTE_EXPECT(dst_ptr[i] == expected[i], framework::LogLevel::ERRORS);
239  }
240 }
241 
242 // *INDENT-OFF*
243 // clang-format off
245  framework::dataset::make("InputInfo", { TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Mismatching data types
246  TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
247  TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
248  TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Invalid weights dimensions
249  TensorInfo(TensorShape(9U, 5U, 7U, 3U), 1, DataType::F32), // Wrongly reshaped weights
250  TensorInfo(TensorShape(8U, 4U, 6U, 4U), 1, DataType::F32),
251  }),
252  framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(315U, 271U), 1, DataType::F16),
253  TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
254  TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
255  TensorInfo(TensorShape(217U, 315U), 1, DataType::F32),
256  TensorInfo(TensorShape(217U, 315U), 1, DataType::F32),
257  TensorInfo(TensorShape(192U, 192U), 1, DataType::F32),
258  })),
265  })),
272  })),
273  framework::dataset::make("TransposeWeights",{ true, true, false, true, true, true })),
274  framework::dataset::make("ReshapedWeights",{ false, false, false, false, false , false})),
275  framework::dataset::make("Expected", { false, true, true, false, false, true })),
276  input_info, weights_info, bias_info, output_info, transpose_weights, reshaped_weights, expected)
277 {
278  // Create Fully Connected layer info
279  FullyConnectedLayerInfo fc_info;
280  fc_info.transpose_weights = transpose_weights;
281  fc_info.are_weights_reshaped = reshaped_weights;
282 
283  Status status = NEFullyConnectedLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), fc_info);
285 }
286 // clang-format on
287 // *INDENT-ON*
288 
289 template <typename T>
290 using NEFullyConnectedLayerFixture = FullyConnectedLayerValidationFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
291 template <typename T>
292 using NEFullyConnectedLayerMixedDataLayoutFixture = FullyConnectedLayerValidationFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>;
293 template <typename T>
294 using NEFullyConnectedLayerDynamicWeightsFixture = FullyConnectedWithDynamicWeightsFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
295 template <typename T>
296 using NEFullyConnectedLayerDynamicBiasFixture = FullyConnectedWithDynamicBiasFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
297 
298 TEST_SUITE(Float)
299 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
300 TEST_SUITE(FP16)
302  FullyConnectedParameters),
304  EmptyActivationFunctionDataset))
305 {
306  // Validate output
307  validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
308 }
310  combine(datasets::FullyConnectedLayerWithActivationDataset(),
311  FullyConnectedParameters),
313  ActivationFunctionsDataset))
314 {
315  // Validate output
316  validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
317 }
319  FullyConnectedParameters),
321  EmptyActivationFunctionDataset))
322 {
323  // Validate output
324  validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16, abs_tolerance_f16);
325 }
327 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
328 
329 TEST_SUITE(FP32)
330 FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFullyConnectedLayerDataset(), FullyConnectedParameters),
331  framework::dataset::make("DataType", DataType::F32)),
332  EmptyActivationFunctionDataset))
333 {
334  // Validate output
335  validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
336 }
338  framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
339  framework::dataset::make("Weights", TensorShape(315U, 271U))),
340  framework::dataset::make("Biases", TensorShape(271U))),
341  framework::dataset::make("Output", TensorShape(271U))),
342  FullyConnectedParameters),
345 {
346  // Validate output
347  validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
348 }
350  combine(datasets::FullyConnectedLayerWithActivationDataset(),
351  FullyConnectedParameters),
353  ActivationFunctionsDataset))
354 {
355  // Validate output
356  validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
357 }
358 FIXTURE_DATA_TEST_CASE(RunLarge, NEFullyConnectedLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeFullyConnectedLayerDataset(), FullyConnectedParameters),
360  EmptyActivationFunctionDataset))
361 {
362  // Validate output
363  validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
364 }
368 {
369 }
372 
373 template <typename T>
374 using NEFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
375 template <typename T>
376 using NEFullyConnectedLayerQuantizedMixedDataLayoutFixture = FullyConnectedLayerValidationQuantizedFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>;
377 
378 TEST_SUITE(Quantized)
379 TEST_SUITE(QASYMM8)
381  combine(datasets::SmallFullyConnectedLayerDataset(),
382  FullyConnectedParameters),
383  framework::dataset::make("DataType", DataType::QASYMM8)),
385  EmptyActivationFunctionDataset))
386 {
387  // Validate output
388  validate(Accessor(_target), _reference, tolerance_qasymm8);
389 }
392  framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
393  framework::dataset::make("Weights", TensorShape(315U, 271U))),
394  framework::dataset::make("Biases", TensorShape(271U))),
395  framework::dataset::make("Output", TensorShape(271U))),
396  FullyConnectedParameters),
400 {
401  // Validate output
402  validate(Accessor(_target), _reference, tolerance_qasymm8);
403 }
405  combine(datasets::FullyConnectedLayerWithActivationDataset(),
406  FullyConnectedParameters),
409  ActivationFunctionsQuantizedDataset))
410 {
411  // Validate output
412  validate(Accessor(_target), _reference, tolerance_qasymm8);
413 }
414 
416  combine(datasets::LargeFullyConnectedLayerDataset(),
417  FullyConnectedParameters),
420  EmptyActivationFunctionDataset))
421 {
422  // Validate output
423  validate(Accessor(_target), _reference, tolerance_qasymm8);
424 }
425 
429 {
430 }
434  combine(datasets::SmallFullyConnectedLayerDataset(),
435  FullyConnectedParameters),
436  framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
438  EmptyActivationFunctionDataset))
439 {
440  // Validate output
441  validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
442 }
445  framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
446  framework::dataset::make("Weights", TensorShape(315U, 271U))),
447  framework::dataset::make("Biases", TensorShape(271U))),
448  framework::dataset::make("Output", TensorShape(271U))),
449  FullyConnectedParameters),
453 {
454  // Validate output
455  validate(Accessor(_target), _reference, tolerance_qasymm8);
456 }
458  combine(datasets::FullyConnectedLayerWithActivationDataset(),
459  FullyConnectedParameters),
462  ActivationFunctionsQuantizedDataset))
463 {
464  // Validate output
465  validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
466 }
467 TEST_SUITE_END() // QASYMM8_SIGNED
468 TEST_SUITE_END() // Quantized
469 TEST_SUITE_END() // FullyConnectedLayer
470 TEST_SUITE_END() // NEON
471 } // namespace validation
472 } // namespace test
473 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
constexpr float tolerance_num_f16
F16 Tolerance number.
Definition: cl_gemm.cpp:75
FullyConnectedWithDynamicWeightsFixture< Tensor, Accessor, NEFullyConnectedLayer, T > NEFullyConnectedLayerDynamicWeightsFixture
half_float::half half
16-bit floating point type
Definition: Types.h:48
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
FullyConnectedLayerValidationFixture< Tensor, Accessor, NEFullyConnectedLayer, T > NEFullyConnectedLayerFixture
Fully connected layer info.
Definition: Types.h:1688
constexpr AbsoluteTolerance< int8_t > tolerance_qasymm8_signed
Definition: Scale.cpp:518
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Status class.
Definition: Error.h:52
Activation Layer Information class.
Definition: Types.h:1625
SimpleTensor< float > src
Definition: DFT.cpp:155
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
1 channel, 1 S32 per channel
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
Accessor implementation for Tensor objects.
Definition: Accessor.h:35
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
std::unique_ptr< AssetsLibrary > library
Definition: main.cpp:76
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
bool are_weights_reshaped
Reshape the weights tensor if false.
Definition: Types.h:1695
FullyConnectedWithDynamicBiasFixture< Tensor, Accessor, NEFullyConnectedLayer, T > NEFullyConnectedLayerDynamicBiasFixture
FullyConnectedLayerValidationFixture< Tensor, Accessor, NEFullyConnectedLayer, T, true > NEFullyConnectedLayerMixedDataLayoutFixture
FullyConnectedLayerValidationQuantizedFixture< Tensor, Accessor, NEFullyConnectedLayer, T > NEFullyConnectedLayerQuantizedFixture
validate(CLAccessor(output_state), expected_output)
TensorInfo src_info(src_shape, 1, data_type)
FullyConnectedLayerValidationQuantizedFixture< Tensor, Accessor, NEFullyConnectedLayer, T, true > NEFullyConnectedLayerQuantizedMixedDataLayoutFixture
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, FullyConnectedLayerInfo fc_info=FullyConnectedLayerInfo())
Static function to check if given info will lead to a valid configuration of NEFullyConnectedLayer.
Num samples, height, width, channels.
bool transpose_weights
Transpose weights if true.
Definition: Types.h:1694
Tensor packing service.
Definition: ITensorPack.h:39
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
quantized, asymmetric fixed-point 8-bit number signed
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
DataType
Available data types.
Definition: Types.h:79
constexpr float abs_tolerance_f32(0.0001f)
F32 Absolute tolerance value for comparing reference&#39;s output against implementation&#39;s output for flo...
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65
const int32_t * bias