33 #include "tests/datasets/LargeConvolutionLayerDataset.h" 34 #include "tests/datasets/SmallConvolutionLayerDataset.h" 35 #include "tests/datasets/TinyConvolutionLayerDataset.h" 40 #include "tests/validation/fixtures/ConvolutionLayerFixture.h" 41 #include "tests/validation/fixtures/WinogradConvolutionLayerFixture.h" 60 func.configure(src, weights, bias, dst, conv_info);
70 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 75 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 78 constexpr
float tolerance_num = 0.07f;
85 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 142 &weights_info.clone()->set_is_resizable(
true),
151 template <
typename T>
152 using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T>;
154 template <
typename T>
163 ActivationFunctionsDataset),
172 ActivationFunctionsDataset),
185 ActivationFunctionsDataset),
194 ActivationFunctionsDataset),
207 ActivationFunctionsDataset),
216 ActivationFunctionsDataset),
229 ActivationFunctionsDataset),
238 ActivationFunctionsDataset),
251 ActivationFunctionsDataset),
261 ActivationFunctionsDataset),
273 ActivationFunctionsDataset),
283 ActivationFunctionsDataset),
295 ActivationFunctionsDataset),
305 ActivationFunctionsDataset),
319 ActivationFunctionsDataset),
329 ActivationFunctionsDataset),
341 datasets::SmallWinogradConvolutionLayer5x5Dataset()),
343 ActivationFunctionsDataset),
353 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 361 ActivationFunctionsDataset),
372 ActivationFunctionsDataset),
385 template <
typename T>
386 using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEConvolutionLayer, T>;
389 #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) 395 ActivationFunctionsDataset))
403 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC 409 ActivationFunctionsDataset))
412 validate(
Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16);
419 framework::dataset::
make("ReshapeWeights", {
true })),
422 ActivationFunctionsDataset))
430 template <
typename T>
431 using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEConvolutionLayer, T>;
433 template <
typename T>
445 framework::dataset::
make("ReshapeWeights", {
true })),
458 framework::dataset::
make("ReshapeWeights", {
true })),
472 framework::dataset::
make("ReshapeWeights", {
true })),
500 template <
typename T>
501 using NEDirectGEMMConv2dLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEGEMMConv2d, T>;
506 framework::dataset::
make("ReshapeWeights", {
true })),
509 ActivationFunctionsDataset))
518 template <
typename T>
519 using NEDirectGEMMConv2dLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConv2d, T>;
521 template <
typename T>
522 using NEDirectGEMMConv2dLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<Tensor, Accessor, NEGEMMConv2d, T, int8_t>;
572 #endif // __aarch64__
Class reprensenting an absolute tolerance value.
constexpr float tolerance_num_f16
F16 Tolerance number.
half_float::half half
16-bit floating point type
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
ConvolutionValidationQuantizedPerChannelFixture< Tensor, Accessor, NEConvolutionLayer, T, int8_t > NEGEMMConvolutionLayerQuantizedPerChannelFixture
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
ConvolutionMethod
Available ConvolutionMethod.
Activation Layer Information class.
WinogradConvolutionLayerFastMathValidationFixture< Tensor, Accessor, NEWinogradConvolutionLayer, T, T, false > NEWinogradConvolutionLayerNoBiasFixture
SimpleTensor< float > src
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
NEWinogradConvolutionLayerFixture< float >
Convolution Layer Weights Information class.
16-bit brain floating-point number
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
Accessor implementation for Tensor objects.
DatasetMode
Possible dataset modes.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
const unsigned int num_groups
const auto QuantizedActivationFunctionsDataset
Input data sets.
Basic implementation of the tensor interface.
TEST_SUITE(U8_to_S8) FIXTURE_DATA_TEST_CASE(RunSmall
Padding and stride information class.
validate(CLAccessor(output_state), expected_output)
Descriptor used by the Convolution function.
Num samples, channels, height, width.
quantized, symmetric per channel fixed-point 8-bit number
Convolution using Winograd.
Lower and Upper Bounded Rectifier ( )
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
Upper Bounded Rectifier ( )
static ConvolutionMethod get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info=WeightsInfo(), const Size2D &dilation=Size2D(1U, 1U), const ActivationLayerInfo &act_info=ActivationLayerInfo(), bool enable_fast_math=false)
Static function to check if given info will return the convolution called by NEConvolutionLayer.
Class reprensenting a relative tolerance value.
Class for specifying the size of an image or rectangle.
Num samples, height, width, channels.
Store the tensor's metadata.
JoinDataset< T, U > concat(T &&dataset1, U &&dataset2)
Helper function to create a JoinDataset.
quantized, asymmetric fixed-point 8-bit number signed
Basic function to compute the convolution layer.
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
DataType
Available data types.
constexpr float abs_tolerance_f32(0.0001f)
F32 Absolute tolerance value for comparing reference's output against implementation's output for flo...
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
const auto QuantizationData
void configure_conv_function< NEGEMMConv2d, Tensor >(NEGEMMConv2d &func, Tensor *src, const Tensor *weights, const Tensor *bias, Tensor *dst, const PadStrideInfo &info, const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
CLWinogradConvolutionLayerFastMathFixture16