Compute Library
 22.08
ConvolutionLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
31 #include "tests/CL/CLAccessor.h"
33 #include "tests/datasets/LargeConvolutionLayerDataset.h"
34 #include "tests/datasets/SmallConvolutionLayerDataset.h"
35 #include "tests/datasets/TinyConvolutionLayerDataset.h"
37 #include "tests/framework/Macros.h"
40 #include "tests/validation/fixtures/ConvolutionLayerFixture.h"
41 
42 namespace arm_compute
43 {
44 namespace test
45 {
46 namespace validation
47 {
48 namespace
49 {
50 class SmallConvolutionLayerDatasetCases final : public datasets::ConvolutionLayerDataset
51 {
52 public:
53  SmallConvolutionLayerDatasetCases()
54  {
55  // 1D Kernel
56  add_config(TensorShape(1U, 130U, 2000U), TensorShape(1U, 1U, 2000U, 2000U), TensorShape(2000U), TensorShape(1U, 130U, 2000U), PadStrideInfo(1, 1, 0, 0));
57  }
58 };
59 
60 RelativeTolerance<float> tolerance_f32(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
61 RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
62 constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
63 constexpr float tolerance_num = 0.07f; /**< Tolerance number */
64 
65 /** CNN data types */
66 const auto CNNDataTypes = framework::dataset::make("DataType",
67 {
72 });
73 
74 /** Grouped CNN data types */
75 const auto GroupedCNNDataTypes = framework::dataset::make("DataType",
76 {
79 });
80 
81 const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
82 {
83  ActivationLayerInfo(),
87 });
88 const auto ActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo",
89 {
90  ActivationLayerInfo(),
92 });
93 
94 bool is_post_op_list_valid_in_gemmconv(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &output_shape, DataType data_type, DataLayout data_layout,
95  const PadStrideInfo &conv_info, const experimental::PostOpList<ITensorInfo *> &post_ops)
96 {
99  const int idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
100 
101  const auto dilation = Size2D(1U, 1U);
102  const unsigned int num_groups = 1U;
103 
104  TensorInfo input_info(input_shape, 1, data_type, data_layout);
105  TensorInfo weights_info(weights_shape, 1, data_type, data_layout);
106 
107  TensorInfo output_info(output_shape, 1, data_type, data_layout);
108 
109  WeightsInfo w_info(false, weights_info.dimension(idx_width), weights_info.dimension(idx_height), weights_info.dimension(idx_kernels));
110 
111  const auto status = CLGEMMConvolutionLayer::validate(&input_info.clone()->set_is_resizable(true),
112  &weights_info.clone()->set_is_resizable(true), nullptr, &output_info.clone()->set_is_resizable(true),
113  conv_info, w_info, dilation, ActivationLayerInfo(), num_groups, post_ops);
114  return bool(status);
115 }
116 } // namespace
117 
118 TEST_SUITE(CL)
119 TEST_SUITE(ConvolutionLayer)
120 
121 // *INDENT-OFF*
122 // clang-format off
123 DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
124  framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
125  TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
126  TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Select GEMM
127  TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32), // Select WINOGRAD
128  TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32), // Select GEMM
129  TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32), // Select GEMM
130  TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32), // Select WINOGRAD
131  TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM
132  TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::QASYMM8_SIGNED), // Select GEMM
133  }),
134  framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
135  TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
136  TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
137  TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32),
138  TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
139  TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16),
140  TensorInfo(TensorShape(5U, 5U, 32U, 19U), 1, DataType::F32),
141  TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32),
143  })),
144  framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
145  TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32),
146  TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
147  TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32),
148  TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
149  TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32),
150  TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
151  TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32),
153  })),
154  framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1),
155  PadStrideInfo(1, 2, 1, 1),
156  PadStrideInfo(1, 1, 0, 0),
157  PadStrideInfo(1, 1, 0, 0),
158  PadStrideInfo(2, 1, 0, 0),
159  PadStrideInfo(3, 2, 1, 0),
160  PadStrideInfo(1, 1, 2, 2),
161  PadStrideInfo(1, 1, 2, 2),
162  PadStrideInfo(1, 1, 2, 2),
163  })),
173  })),
174  framework::dataset::make("Dilation", { Size2D(1U, 1U),
175  Size2D(1U, 1U),
176  Size2D(1U, 1U),
177  Size2D(1U, 1U),
178  Size2D(1U, 1U),
179  Size2D(1U, 1U),
180  Size2D(1U, 1U),
181  Size2D(2U, 1U),
182  Size2D(2U, 1U),
183  })),
184  framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true, true })),
194  })),
195  input_info, weights_info, output_info, conv_info, gpu_target, dilation, enable_fast_math, expected)
196 {
197  ConvolutionMethod is_valid = CLConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
198  &weights_info.clone()->set_is_resizable(true),
199  &output_info.clone()->set_is_resizable(true), conv_info,
200  WeightsInfo(),
202  gpu_target,
203  dilation,
204  enable_fast_math);
206 }
207 
208 DATA_TEST_CASE(ValidatePostOpSupportInConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
209  framework::dataset::make("InputInfo", { TensorInfo(TensorShape(2U, 17U, 31U), 1, DataType::F32, DataLayout::NHWC), // Select GEMM
210  TensorInfo(TensorShape(17U, 31U, 32U), 1, DataType::F32, DataLayout::NCHW), // Select WINOGRAD
211  TensorInfo(TensorShape(27U, 27U, 48U), 1, DataType::F32, DataLayout::NCHW), // Select Direct
212  TensorInfo(TensorShape(27U, 27U, 48U), 1, DataType::F32, DataLayout::NCHW), // Select FFT
213  }),
218  })),
223  })),
224  framework::dataset::make("ConvInfo", { PadStrideInfo(1U, 1U, 0U, 0U),
225  PadStrideInfo(1U, 1U, 2U, 2U),
226  PadStrideInfo(1U, 1U, 2U, 2U),
227  PadStrideInfo(1U, 1U, 5U, 5U),
228  })),
229  framework::dataset::make("EnableFastMath", { false, true, false, false})),
234  })),
235  framework::dataset::make("PostOpSupported",{ true, false, false, false
236  })),
237  input_info, weights_info, output_info, conv_info, enable_fast_math, expected_method, post_op_supported)
238 {
239  const int idx_width = get_data_layout_dimension_index(input_info.data_layout(), DataLayoutDimension::WIDTH);
240  const int idx_height = get_data_layout_dimension_index(input_info.data_layout(), DataLayoutDimension::HEIGHT);
241  const int idx_kernels = get_data_layout_dimension_index(input_info.data_layout(), DataLayoutDimension::BATCHES);
242 
243  const auto dilation = Size2D(1U, 1U);
244  const unsigned int num_groups = 1U;
245 
246  WeightsInfo w_info(false, weights_info.dimension(idx_width), weights_info.dimension(idx_height), weights_info.dimension(idx_kernels));
247 
250 
251  ConvolutionMethod actual_method = CLConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
252  &weights_info.clone()->set_is_resizable(true),
253  &output_info.clone()->set_is_resizable(true), conv_info,
254  WeightsInfo(),
257  dilation,
258  enable_fast_math);
259  ARM_COMPUTE_EXPECT(actual_method == expected_method, framework::LogLevel::ERRORS);
260  const auto is_valid = CLConvolutionLayer::validate(&input_info.clone()->set_is_resizable(true),
261  &weights_info.clone()->set_is_resizable(true),
262  nullptr,
263  &output_info.clone()->set_is_resizable(true),
264  conv_info,
265  w_info,
266  dilation,
268  enable_fast_math,
269  num_groups,
270  post_ops);
271  ARM_COMPUTE_EXPECT( bool(is_valid) == post_op_supported, framework::LogLevel::ERRORS);
272 }
273 // clang-format on
274 // *INDENT-ON*
275 TEST_SUITE_END() // ConvolutionLayer
276 
277 TEST_SUITE(GEMMConvolutionLayer)
278 template <typename T>
279 using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
280 template <typename T>
281 using CLGEMMConvolutionLayerMixedDataLayoutFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, true>;
282 template <typename T>
283 using CLConvolutionValidationWithPaddingFixture = ConvolutionValidationWithPaddingFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
284 
285 TEST_SUITE(ValidateFusedPostOpsConfigs)
286 TEST_SUITE(Invalid)
287 TEST_CASE(UnsupportedPostOpSequence, framework::DatasetMode::ALL)
288 {
289  const auto data_type = DataType::F32;
290  const auto data_layout = DataLayout::NHWC;
291  const auto conv_info = PadStrideInfo(1, 1, 0, 0);
292  const auto input_shape = TensorShape(16U, 14U, 12U, 2U);
293  const auto weights_shape = TensorShape(16U, 1U, 1U, 24U);
294 
295  const auto output_shape = misc::shape_calculator::compute_deep_convolution_shape(input_shape, data_layout, weights_shape, conv_info);
296 
297  const TensorShape post_op_arg0_shape(output_shape);
298  TensorInfo post_op_arg_info(post_op_arg0_shape, 1, data_type);
299  auto post_op_arg1_info = post_op_arg_info.clone();
300 
301  // Unsupported sequence of post ops
304  &post_op_arg_info,
305  1,
307  post_ops.push_back_op<experimental::PostOpEltwiseAdd<ITensorInfo *>>(
308  post_op_arg1_info.get(),
309  0,
311 
312  ARM_COMPUTE_EXPECT(is_post_op_list_valid_in_gemmconv(input_shape, weights_shape, output_shape, data_type, data_layout, conv_info, post_ops) == false, framework::LogLevel::ERRORS);
313 }
315 {
316  const auto data_type = DataType::F32;
317  const auto data_layout = DataLayout::NCHW;
318  const auto conv_info = PadStrideInfo(1, 1, 0, 0);
319  const auto input_shape = TensorShape(14U, 12U, 16U, 2U);
320  const auto weights_shape = TensorShape(1U, 1U, 16U, 24U);
321 
322  const auto output_shape = misc::shape_calculator::compute_deep_convolution_shape(input_shape, data_layout, weights_shape, conv_info);
323 
324  const TensorShape post_op_arg0_shape(output_shape);
325  TensorInfo post_op_arg_info(post_op_arg0_shape, 1, data_type);
326 
329  &post_op_arg_info,
330  1,
332 
333  ARM_COMPUTE_EXPECT(is_post_op_list_valid_in_gemmconv(input_shape, weights_shape, output_shape, data_type, data_layout, conv_info, post_ops) == false, framework::LogLevel::ERRORS);
334 }
335 TEST_CASE(OnlyFloatingTypeIsSupported, framework::DatasetMode::ALL)
336 {
337  const auto data_type = DataType::QASYMM8;
338  const auto data_layout = DataLayout::NHWC;
339  const auto conv_info = PadStrideInfo(1, 1, 0, 0);
340  const auto input_shape = TensorShape(16U, 14U, 12U, 2U);
341  const auto weights_shape = TensorShape(16U, 1U, 1U, 24U);
342 
343  const auto output_shape = misc::shape_calculator::compute_deep_convolution_shape(input_shape, data_layout, weights_shape, conv_info);
344 
345  const TensorShape post_op_arg0_shape(output_shape);
346  TensorInfo post_op_arg_info(post_op_arg0_shape, 1, data_type);
347 
350  &post_op_arg_info,
351  1,
353 
354  ARM_COMPUTE_EXPECT(is_post_op_list_valid_in_gemmconv(input_shape, weights_shape, output_shape, data_type, data_layout, conv_info, post_ops) == false, framework::LogLevel::ERRORS);
355 }
356 TEST_CASE(OnlyConv1x1Stride1IsSupported_UnsupportedKernelSize, framework::DatasetMode::ALL)
357 {
358  const auto data_type = DataType::F32;
359  const auto data_layout = DataLayout::NHWC;
360  const auto conv_info = PadStrideInfo(1, 1, 0, 0);
361  const auto input_shape = TensorShape(16U, 14U, 12U, 2U);
362  const auto weights_shape = TensorShape(16U, 3U, 3U, 24U);
363 
364  const auto output_shape = misc::shape_calculator::compute_deep_convolution_shape(input_shape, data_layout, weights_shape, conv_info);
365 
366  const TensorShape post_op_arg0_shape(output_shape);
367  TensorInfo post_op_arg_info(post_op_arg0_shape, 1, data_type);
368 
371  &post_op_arg_info,
372  1,
374 
375  ARM_COMPUTE_EXPECT(is_post_op_list_valid_in_gemmconv(input_shape, weights_shape, output_shape, data_type, data_layout, conv_info, post_ops) == false, framework::LogLevel::ERRORS);
376 }
377 TEST_CASE(OnlyConv1x1Stride1IsSupported_UnsupportedStride, framework::DatasetMode::ALL)
378 {
379  const auto data_type = DataType::F32;
380  const auto data_layout = DataLayout::NHWC;
381  const auto conv_info = PadStrideInfo(3, 3, 0, 0);
382  const auto input_shape = TensorShape(16U, 14U, 12U, 2U);
383  const auto weights_shape = TensorShape(16U, 1U, 1U, 24U);
384 
385  const auto output_shape = misc::shape_calculator::compute_deep_convolution_shape(input_shape, data_layout, weights_shape, conv_info);
386 
387  const TensorShape post_op_arg0_shape(output_shape);
388  TensorInfo post_op_arg_info(post_op_arg0_shape, 1, data_type);
389 
392  &post_op_arg_info,
393  1,
395 
396  ARM_COMPUTE_EXPECT(is_post_op_list_valid_in_gemmconv(input_shape, weights_shape, output_shape, data_type, data_layout, conv_info, post_ops) == false, framework::LogLevel::ERRORS);
397 }
398 TEST_SUITE_END() // Invalid
399 TEST_SUITE(Valid)
400 TEST_CASE(EmptyPostOpList, framework::DatasetMode::ALL)
401 {
402  const auto data_type = DataType::F32;
403  const auto data_layout = DataLayout::NHWC;
404  const auto conv_info = PadStrideInfo(1, 1, 0, 0);
405  const auto input_shape = TensorShape(16U, 14U, 12U, 2U);
406  const auto weights_shape = TensorShape(16U, 1U, 1U, 24U);
407 
408  const auto output_shape = misc::shape_calculator::compute_deep_convolution_shape(input_shape, data_layout, weights_shape, conv_info);
409 
411 
412  ARM_COMPUTE_EXPECT(is_post_op_list_valid_in_gemmconv(input_shape, weights_shape, output_shape, data_type, data_layout, conv_info, post_ops) == true, framework::LogLevel::ERRORS);
413 }
415 {
416  const auto data_type = DataType::F32;
417  const auto data_layout = DataLayout::NHWC;
418  const auto conv_info = PadStrideInfo(1, 1, 0, 0);
419  const auto input_shape = TensorShape(16U, 14U, 12U, 2U);
420  const auto weights_shape = TensorShape(16U, 1U, 1U, 24U);
421 
422  const auto output_shape = misc::shape_calculator::compute_deep_convolution_shape(input_shape, data_layout, weights_shape, conv_info);
423 
424  TensorShape post_op_arg0_shape(output_shape);
425  post_op_arg0_shape[1] = 1; // Broadcast in "Y" (second) dimension
426  TensorInfo post_op_arg_info(post_op_arg0_shape, 1, data_type);
427 
430  &post_op_arg_info,
431  1,
433 
434  ARM_COMPUTE_EXPECT(is_post_op_list_valid_in_gemmconv(input_shape, weights_shape, output_shape, data_type, data_layout, conv_info, post_ops) == true, framework::LogLevel::ERRORS);
435 }
436 TEST_SUITE_END() // Valid
437 TEST_SUITE_END() // ValidateFusedPostOps
438 TEST_SUITE(Float)
439 TEST_SUITE(FP16)
440 
441 FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
442  framework::dataset::make("ReshapeWeights", { true })),
443  framework::dataset::make("DataType",
444  DataType::F16)),
446  ActivationFunctionsSmallDataset))
447 {
448  // Validate output
449  validate(CLAccessor(_target), _reference, tolerance_f16, tolerance_num);
450 }
451 TEST_SUITE_END() // FP16
452 
453 TEST_SUITE(FP32)
454 
455 FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
456  framework::dataset::make("ReshapeWeights", { true })),
457  framework::dataset::make("DataType",
458  DataType::F32)),
460  ActivationFunctionsSmallDataset))
461 {
462  // Validate output
463  validate(CLAccessor(_target), _reference, tolerance_f32);
464 }
467  framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
468  framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
470  framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
471  framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
472  framework::dataset::make("Dilation", Size2D(1, 1))),
473  framework::dataset::make("ReshapeWeights", { true })),
476  ActivationFunctionsSmallDataset))
477 {
478  // Validate output
479  validate(CLAccessor(_target), _reference, tolerance_f32);
480 }
482  combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerPrePaddingDataset(),
483  framework::dataset::make("ReshapeWeights", { true })),
486  framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })),
487 framework::dataset::make("PrePadLayer", { PaddingList({ { 1, 1 }, { 1, 1 } }) })))
488 {
489  // Validate output
490  validate(CLAccessor(_target), _reference, tolerance_f32);
491 }
492 
493 TEST_SUITE_END() // FP32
494 TEST_SUITE_END() // Float
495 
496 template <typename T>
497 using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
498 template <typename T>
499 using CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, true>;
500 template <typename T>
501 using CLGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, int8_t>;
502 
504 {
508 });
510 {
513 });
514 
515 TEST_SUITE(Quantized)
516 
517 const auto QuantizationData = framework::dataset::make("QuantizationInfo",
518 {
519  QuantizationInfo(0.5f, 10),
520  QuantizationInfo(0.3f, 3),
521  QuantizationInfo(1.1f, 10),
522 });
524 
525 FIXTURE_DATA_TEST_CASE(RunSmallCases, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
526  combine(combine(combine(combine(combine(SmallConvolutionLayerDatasetCases(),
527  framework::dataset::make("ReshapeWeights", { true })),
532 {
533  // Validate output
534  validate(CLAccessor(_target), _reference, tolerance_qasymm8);
535 }
536 
537 FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
538  combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
539  framework::dataset::make("ReshapeWeights", { true })),
544 {
545  // Validate output
546  validate(CLAccessor(_target), _reference, tolerance_qasymm8);
547 }
550  framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
551  framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
553  framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
554  framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
555  framework::dataset::make("Dilation", Size2D(1, 1))),
556  framework::dataset::make("ReshapeWeights", { true })),
561 {
562  // Validate output
563  validate(CLAccessor(_target), _reference, tolerance_qasymm8);
564 }
565 TEST_SUITE_END() // QASYMM8
567 FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL,
568  combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
569  framework::dataset::make("ReshapeWeights", { true })),
574 {
575  // Validate output
576  validate(CLAccessor(_target), _reference, tolerance_qasymm8);
577 }
580  framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
581  framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
583  framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
584  framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
585  framework::dataset::make("Dilation", Size2D(1, 1))),
586  framework::dataset::make("ReshapeWeights", { true })),
591 {
592  // Validate output
593  validate(CLAccessor(_target), _reference, tolerance_qasymm8);
594 }
595 TEST_SUITE_END() // QASYMM8_SIGNED
597 
599  combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
600  framework::dataset::make("ReshapeWeights", { true })),
606 {
607  // Validate output
608  validate(CLAccessor(_target), _reference, tolerance_qasymm8);
609 }
610 
612  combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
613  framework::dataset::make("ReshapeWeights", { true })),
619 {
620  // Validate output
621  validate(CLAccessor(_target), _reference, tolerance_qasymm8);
622 }
623 TEST_SUITE_END() // QSYMM8_PER_CHANNEL
624 TEST_SUITE_END() // Quantized
625 
626 TEST_SUITE_END() // GEMMConvolutionLayer
627 
628 template <typename T>
629 using CLGEMMGroupedConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
630 
631 TEST_SUITE(GroupedGEMMConvolutionLayer)
632 
633 TEST_SUITE(Float)
634 TEST_SUITE(FP32)
635 
636 FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
637  framework::dataset::make("ReshapeWeights", { true })),
639  framework::dataset::make("DataLayout", { DataLayout::NCHW })),
640  ActivationFunctionsSmallDataset))
641 {
642  // Validate output
643  validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
644 }
645 
646 FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
647  combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(),
648  framework::dataset::make("ReshapeWeights", { true })),
650  framework::dataset::make("DataLayout", { DataLayout::NCHW })),
651  ActivationFunctionsDataset))
652 {
653  // Validate output
654  validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
655 }
656 TEST_SUITE_END() // FP32
657 
658 TEST_SUITE(FP16)
659 
660 FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(),
661  framework::dataset::make("ReshapeWeights", { true })),
663  framework::dataset::make("DataLayout", { DataLayout::NCHW })),
664  ActivationFunctionsSmallDataset))
665 {
666  // Validate output
667  validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
668 }
669 
671  combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(),
672  framework::dataset::make("ReshapeWeights", { true })),
674  framework::dataset::make("DataLayout", { DataLayout::NCHW })),
675  ActivationFunctionsDataset))
676 {
677  // Validate output
678  validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
679 }
680 TEST_SUITE_END() // FP16
681 TEST_SUITE_END() // Float
682 
683 TEST_SUITE_END() // GroupedGEMMConvolutionLayer
684 TEST_SUITE_END() // CL
685 } // namespace validation
686 } // namespace test
687 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
Definition: PostOps.h:80
experimental::PostOpList< ITensorInfo * > post_ops
RelativeTolerance< float > tolerance_f32(0.001f)
F32 Tolerance value for comparing reference&#39;s output against implementation&#39;s output for floating poi...
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info=WeightsInfo(), const Size2D &dilation=Size2D(1U, 1U), const ActivationLayerInfo &act_info=ActivationLayerInfo(), bool enable_fast_math=false, unsigned int num_groups=1, const experimental::PostOpList< ITensorInfo *> &post_ops=experimental::PostOpList< ITensorInfo *> {})
Static function to check if given info will lead to a valid configuration of CLConvolutionLayer.
std::unique_ptr< ITensorInfo > clone() const override
Provide a clone of the current object of class T.
Definition: TensorInfo.cpp:282
std::vector< PaddingInfo > PaddingList
List of padding information.
Definition: Types.h:453
half_float::half half
16-bit floating point type
Definition: Types.h:48
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
ConvolutionMethod
Available ConvolutionMethod.
Definition: Types.h:134
Activation Layer Information class.
Definition: Types.h:1625
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
Convolution Layer Weights Information class.
Definition: Types.h:2006
ConvolutionValidationFixture< CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, true > CLGEMMConvolutionLayerMixedDataLayoutFixture
void push_back_op(Args &&... args)
Add a new post op at the end of the list.
Definition: IPostOp.h:144
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
const auto input_shape
Validate test suite is to test ARM_COMPUTE_RETURN_ON_* macros we use to check the validity of given a...
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
Accessor implementation for CLTensor objects.
Definition: CLAccessor.h:36
const unsigned int num_groups
Definition: Im2Col.cpp:153
const auto QuantizedActivationFunctionsDataset
Input data sets.
ConvolutionValidationWithPaddingFixture< CLTensor, CLAccessor, CLGEMMConvolutionLayer, T > CLConvolutionValidationWithPaddingFixture
Padding and stride information class.
Definition: Types.h:669
validate(CLAccessor(output_state), expected_output)
Num samples, channels, height, width.
quantized, symmetric per channel fixed-point 8-bit number
Convolution using Winograd.
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info=WeightsInfo(), const Size2D &dilation=Size2D(1U, 1U), const ActivationLayerInfo &act_info=ActivationLayerInfo(), unsigned int num_groups=1, const experimental::PostOpList< ITensorInfo *> &post_ops=experimental::PostOpList< ITensorInfo *> {})
Static function to check if given info will lead to a valid configuration of CLGEMMConvolutionLayer.
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
Num samples, height, width, channels.
static ConvolutionMethod get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info, const ActivationLayerInfo &act_info, const GPUTarget gpu_target, const Size2D &dilation=Size2D(1U, 1U), bool enable_fast_math=false)
Static function to check if given info will return the convolution called by CLConvolutionLayer.
ConvolutionValidationQuantizedPerChannelFixture< CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, int8_t > CLGEMMConvolutionLayerQuantizedPerChannelFixture
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
quantized, asymmetric fixed-point 8-bit number signed
RelativeTolerance< half_float::half > tolerance_f16(half(0.2))
F16 Tolerance value for comparing reference&#39;s output against implementation&#39;s output for floating poi...
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
DataType
Available data types.
Definition: Types.h:79
DataLayout
[DataLayout enum definition]
Definition: Types.h:113
ConvolutionValidationQuantizedFixture< CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, true > CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture
A sequence of PostOps that can be appended to the end of other operators.
Definition: IPostOp.h:119
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65
Convolution using GEMM.
(EXPERIMENTAL_POST_OPS) Implementation of specific IPostOps
Definition: PostOps.h:42
TensorShape compute_deep_convolution_shape(const TensorShape &input_shape, DataLayout input_data_layout, const TensorShape &weights_shape, const PadStrideInfo &conv_info)
Calculate the deep convolution shape output shape of a tensor.