31 #include "tests/datasets/LargeConvolutionLayerDataset.h" 32 #include "tests/datasets/SmallConvolutionLayerDataset.h" 33 #include "tests/datasets/TinyConvolutionLayerDataset.h" 38 #include "tests/validation/fixtures/ConvolutionLayerFixture.h" 48 class SmallConvolutionLayerDatasetCases final :
public datasets::ConvolutionLayerDataset
51 SmallConvolutionLayerDatasetCases()
54 add_config(TensorShape(1U, 130U, 2000U), TensorShape(1U, 1U, 2000U, 2000U), TensorShape(2000U), TensorShape(1U, 130U, 2000U), PadStrideInfo(1, 1, 0, 0));
60 constexpr AbsoluteTolerance<float> tolerance_qasymm8(1);
61 constexpr
float tolerance_num = 0.07f;
81 ActivationLayerInfo(),
88 ActivationLayerInfo(),
159 framework::dataset::make(
"EnableFastMath", {
false,
false,
false,
false,
false,
false,
true,
true,
true })),
173 &weights_info.clone()->set_is_resizable(
true),
174 &output_info.clone()->set_is_resizable(
true),
conv_info,
187 template <
typename T>
188 using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
194 framework::dataset::
make("ReshapeWeights", {
true })),
198 ActivationFunctionsSmallDataset))
208 framework::dataset::
make("ReshapeWeights", {
true })),
212 ActivationFunctionsSmallDataset))
220 template <
typename T>
221 using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
222 template <
typename T>
241 QuantizationInfo(0.5f, 10),
242 QuantizationInfo(0.3f, 3),
243 QuantizationInfo(1.1f, 10),
249 framework::dataset::
make("ReshapeWeights", {
true })),
275 framework::dataset::
make("ReshapeWeights", {
true })),
289 framework::dataset::
make("ReshapeWeights", {
true })),
317 template <
typename T>
318 using CLGEMMGroupedConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
326 framework::dataset::
make("ReshapeWeights", {
true })),
329 ActivationFunctionsSmallDataset))
340 ActivationFunctionsDataset))
350 framework::dataset::
make("ReshapeWeights", {
true })),
353 ActivationFunctionsSmallDataset))
364 ActivationFunctionsDataset))
RelativeTolerance< float > tolerance_f32(0.001f)
F32 Tolerance value for comparing reference's output against implementation's output for floating poi...
half_float::half half
16-bit floating point type
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
const auto QuantizedActivationFunctionsSmallDataset
ConvolutionMethod
Available ConvolutionMethod.
Activation Layer Information class.
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
Convolution Layer Weights Information class.
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
DatasetMode
Possible dataset modes.
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
Accessor implementation for CLTensor objects.
const auto QuantizedActivationFunctionsDataset
Input data sets.
TEST_SUITE(U8_to_S8) FIXTURE_DATA_TEST_CASE(RunSmall
Padding and stride information class.
validate(CLAccessor(output_state), expected_output)
Num samples, channels, height, width.
quantized, symmetric per channel fixed-point 8-bit number
Convolution using Winograd.
Lower and Upper Bounded Rectifier ( )
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Upper Bounded Rectifier ( )
Class for specifying the size of an image or rectangle.
Num samples, height, width, channels.
static ConvolutionMethod get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info, const ActivationLayerInfo &act_info, const GPUTarget gpu_target, const Size2D &dilation=Size2D(1U, 1U), bool enable_fast_math=false)
Static function to check if given info will return the convolution called by CLConvolutionLayer.
ConvolutionValidationQuantizedPerChannelFixture< CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, int8_t > CLGEMMConvolutionLayerQuantizedPerChannelFixture
Store the tensor's metadata.
CLGEMMGroupedConvolutionLayerFixture< half >
quantized, asymmetric fixed-point 8-bit number signed
RelativeTolerance< half_float::half > tolerance_f16(half(0.2))
F16 Tolerance value for comparing reference's output against implementation's output for floating poi...
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
const auto QuantizationData