37 #include "tests/datasets/LargeConvolutionLayerDataset.h" 38 #include "tests/datasets/SmallConvolutionLayerDataset.h" 39 #include "tests/datasets/TinyConvolutionLayerDataset.h" 44 #include "tests/validation/fixtures/ConvolutionLayerFixture.h" 45 #include "tests/validation/fixtures/WinogradConvolutionLayerFixture.h" 64 func.configure(src, weights, bias, dst, conv_info);
74 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 79 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 82 constexpr
float tolerance_num = 0.07f;
89 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 146 &weights_info.clone()->set_is_resizable(
true),
155 template <
typename T>
156 using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T>;
157 template <
typename T>
160 template <
typename T>
172 auto winograd = std::make_unique<cpu::CpuWinogradConv2d>();
179 winograd->configure(&src_info, &b_info, &w_info, &dst_info, pad_info);
182 auto a = create_tensor<Tensor>(
src_info);
183 auto b = create_tensor<Tensor>(b_info);
184 auto c = create_tensor<Tensor>(w_info);
185 a.allocator()->allocate();
186 b.allocator()->allocate();
187 c.allocator()->allocate();
193 auto ws = manage_workspace<Tensor>(winograd->workspace(), mg, run_pack, prep_pack);
194 auto run_conv = [&]() ->
Tensor 196 auto dst = create_tensor<Tensor>(dst_info);
197 dst.allocator()->allocate();
205 winograd->prepare(prep_pack);
206 winograd->run(run_pack);
210 auto result_0 = run_conv();
211 auto result_1 = run_conv();
213 for(
size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
228 auto gemm = std::make_unique<NEWinogradConvolutionLayer>();
235 auto run_conv = [&]()
238 auto w = create_tensor<Tensor>(w_info);
239 auto b = create_tensor<Tensor>(b_info);
240 auto dst = create_tensor<Tensor>(dst_info);
242 gemm->configure(&
src, &b, &w, &dst, pad_info);
244 src.allocator()->allocate();
245 b.allocator()->allocate();
246 w.allocator()->allocate();
247 dst.allocator()->allocate();
256 auto result_0 = run_conv();
257 auto result_1 = run_conv();
259 for(
size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
271 ActivationFunctionsDataset),
286 ActivationFunctionsDataset),
295 ActivationFunctionsDataset),
308 ActivationFunctionsDataset),
317 ActivationFunctionsDataset),
330 ActivationFunctionsDataset),
339 ActivationFunctionsDataset),
352 ActivationFunctionsDataset),
361 ActivationFunctionsDataset),
374 ActivationFunctionsDataset),
384 ActivationFunctionsDataset),
396 ActivationFunctionsDataset),
406 ActivationFunctionsDataset),
418 ActivationFunctionsDataset),
428 ActivationFunctionsDataset),
442 ActivationFunctionsDataset),
452 ActivationFunctionsDataset),
464 datasets::SmallWinogradConvolutionLayer5x5Dataset()),
466 ActivationFunctionsDataset),
476 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 484 ActivationFunctionsDataset),
495 ActivationFunctionsDataset),
508 template <
typename T>
509 using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEConvolutionLayer, T>;
510 template <
typename T>
522 auto conv = std::make_unique<cpu::CpuGemmConv2d>();
529 conv->configure(&src_info, &weight_info, &bias_info, &dst_info,
conv_info, weights_info);
533 auto weight = create_tensor<Tensor>(weight_info);
534 auto bias = create_tensor<Tensor>(bias_info);
535 src.allocator()->allocate();
536 weight.allocator()->allocate();
537 bias.allocator()->allocate();
543 auto ws = manage_workspace<Tensor>(conv->workspace(), mg, run_pack, prep_pack);
545 auto run_conv = [&]() ->
Tensor 547 auto dst = create_tensor<Tensor>(dst_info);
548 dst.allocator()->allocate();
555 conv->prepare(prep_pack);
559 auto result_0 = run_conv();
560 auto result_1 = run_conv();
561 for(
size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
576 auto conv = std::make_unique<NEGEMMConvolutionLayer>();
583 auto run_conv = [&]()
586 auto weight = create_tensor<Tensor>(weight_info);
587 auto bias = create_tensor<Tensor>(bias_info);
588 auto dst = create_tensor<Tensor>(dst_info);
589 conv->configure(&
src, &weight, &bias, &dst,
conv_info, weights_info);
590 src.allocator()->allocate();
591 weight.allocator()->allocate();
592 bias.allocator()->allocate();
593 dst.allocator()->allocate();
600 auto result_0 = run_conv();
601 auto result_1 = run_conv();
602 for(
size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
609 #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) 615 ActivationFunctionsDataset))
623 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 629 ActivationFunctionsDataset))
632 validate(
Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
639 framework::dataset::
make("ReshapeWeights", {
true })),
642 ActivationFunctionsDataset))
658 ActivationFunctionsDataset))
666 template <
typename T>
667 using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEConvolutionLayer, T>;
668 template <
typename T>
671 template <
typename T>
683 framework::dataset::
make("ReshapeWeights", {
true })),
713 framework::dataset::
make("ReshapeWeights", {
true })),
744 framework::dataset::
make("ReshapeWeights", {
true })),
772 template <
typename T>
773 using NEDirectGEMMConv2dLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEGEMMConv2d, T>;
784 auto conv = std::make_unique<cpu::CpuGemmDirectConv2d>();
790 conv->configure(&src_info, &weight_info, &bias_info, &dst_info,
conv_info);
794 auto weight = create_tensor<Tensor>(weight_info);
795 auto bias = create_tensor<Tensor>(bias_info);
796 src.allocator()->allocate();
797 weight.allocator()->allocate();
798 bias.allocator()->allocate();
804 auto ws = manage_workspace<Tensor>(conv->workspace(), mg, run_pack, prep_pack);
806 auto run_conv = [&]() ->
Tensor 808 auto dst = create_tensor<Tensor>(dst_info);
809 dst.allocator()->allocate();
816 conv->prepare(prep_pack);
820 auto result_0 = run_conv();
821 auto result_1 = run_conv();
822 for(
size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
837 auto conv = std::make_unique<NEGEMMConv2d>();
843 auto run_conv = [&]()
846 auto weight = create_tensor<Tensor>(weight_info);
847 auto bias = create_tensor<Tensor>(bias_info);
848 auto dst = create_tensor<Tensor>(dst_info);
850 src.allocator()->allocate();
851 weight.allocator()->allocate();
852 bias.allocator()->allocate();
853 dst.allocator()->allocate();
860 auto result_0 = run_conv();
861 auto result_1 = run_conv();
862 for(
size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
871 framework::dataset::
make("ReshapeWeights", {
true })),
874 ActivationFunctionsDataset))
883 template <
typename T>
884 using NEDirectGEMMConv2dLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConv2d, T>;
886 template <
typename T>
887 using NEDirectGEMMConv2dLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<Tensor, Accessor, NEGEMMConv2d, T, int8_t>;
937 #endif // __aarch64__
Class reprensenting an absolute tolerance value.
NEGEMMConvolutionLayerQuantizedFixture< int8_t >
constexpr float tolerance_num_f16
F16 Tolerance number.
ConvolutionValidationQuantizedFixture< Tensor, Accessor, NEConvolutionLayer, T, true > NEGEMMConvolutionLayerQuantizedMixedDataLayoutFixture
ConvolutionValidationFixture< Tensor, Accessor, NEConvolutionLayer, T, true > NEGEMMConvolutionLayerMixedDataLayoutFixture
half_float::half half
16-bit floating point type
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
ConvolutionValidationQuantizedPerChannelFixture< Tensor, Accessor, NEConvolutionLayer, T, int8_t > NEGEMMConvolutionLayerQuantizedPerChannelFixture
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
ConvolutionMethod
Available ConvolutionMethod.
Activation Layer Information class.
WinogradConvolutionLayerFastMathValidationFixture< Tensor, Accessor, NEWinogradConvolutionLayer, T, T, false > NEWinogradConvolutionLayerNoBiasFixture
SimpleTensor< float > src
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
NEWinogradConvolutionLayerFixture< float >
Convolution Layer Weights Information class.
16-bit brain floating-point number
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
Accessor implementation for Tensor objects.
DatasetMode
Possible dataset modes.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
std::unique_ptr< AssetsLibrary > library
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
const unsigned int num_groups
const auto QuantizedActivationFunctionsDataset
Input data sets.
Basic implementation of the tensor interface.
Padding and stride information class.
validate(CLAccessor(output_state), expected_output)
UniqueGemmCommon< Top, Tret > gemm(const GemmArgs &args, const OutputStage &os)
Descriptor used by the 2d Convolution function.
Num samples, channels, height, width.
TensorInfo src_info(src_shape, 1, data_type)
quantized, symmetric per channel fixed-point 8-bit number
Convolution using Winograd.
Lower and Upper Bounded Rectifier ( )
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Upper Bounded Rectifier ( )
static ConvolutionMethod get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info=WeightsInfo(), const Size2D &dilation=Size2D(1U, 1U), const ActivationLayerInfo &act_info=ActivationLayerInfo(), bool enable_fast_math=false)
Static function to check if given info will return the convolution called by NEConvolutionLayer.
WinogradConvolutionLayerFastMathValidationFixture< Tensor, Accessor, NEWinogradConvolutionLayer, T, T, true, true > NEWinogradConvolutionLayerMixedDataLayoutFixture
Class reprensenting a relative tolerance value.
Class for specifying the size of an image or rectangle.
Num samples, height, width, channels.
Store the tensor's metadata.
JoinDataset< T, U > concat(T &&dataset1, U &&dataset2)
Helper function to create a JoinDataset.
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
quantized, asymmetric fixed-point 8-bit number signed
Basic function to compute the convolution layer.
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
DataType
Available data types.
constexpr float abs_tolerance_f32(0.0001f)
F32 Absolute tolerance value for comparing reference's output against implementation's output for flo...
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
const auto QuantizationData
void configure_conv_function< NEGEMMConv2d, Tensor >(NEGEMMConv2d &func, Tensor *src, const Tensor *weights, const Tensor *bias, Tensor *dst, const PadStrideInfo &info, const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
CLWinogradConvolutionLayerFastMathFixture16