30 #include "tests/datasets/ConvertPolicyDataset.h" 31 #include "tests/datasets/ShapeDatasets.h" 36 #include "tests/validation/fixtures/ArithmeticOperationsFixture.h" 48 { ActivationLayerInfo() });
109 template <
typename T>
145 template <
typename T>
146 using CLArithmeticAdditionQuantizedFixture = ArithmeticAdditionValidationQuantizedFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
151 framework::dataset::
make("
DataType", DataType::QASYMM8)),
161 template <
typename T>
190 framework::dataset::
make("
DataType", DataType::QASYMM8_SIGNED)),
203 framework::dataset::
make("
DataType", DataType::QSYMM16)),
216 template <
typename T>
217 using CLArithmeticAdditionFloatFixture = ArithmeticAdditionValidationFloatFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
224 EmptyActivationFunctionsDataset),
234 ActivationFunctionsDataset),
247 EmptyActivationFunctionsDataset),
257 ActivationFunctionsDataset),
267 EmptyActivationFunctionsDataset),
274 template <
typename T>
280 EmptyActivationFunctionsDataset),
289 ActivationFunctionsDataset),
299 EmptyActivationFunctionsDataset),
quantized, symmetric fixed-point 16-bit number
1 channel, 1 U8 per channel
CLArithmeticAdditionFloatFixture< float >
half_float::half half
16-bit floating point type
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
ArithmeticAdditionValidationQuantizedBroadcastFixture< CLTensor, CLAccessor, CLArithmeticAddition, T > CLArithmeticAdditionBroadcastQuantizedFixture
Activation Layer Information class.
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
DatasetMode
Possible dataset modes.
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
Accessor implementation for CLTensor objects.
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of opencl::kernels::ClSatur...
validate(CLAccessor(output_state), expected_output)
1 channel, 1 S16 per channel
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Upper Bounded Rectifier ( )
Store the tensor's metadata.
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
quantized, asymmetric fixed-point 8-bit number signed
ArithmeticAdditionBroadcastValidationFloatFixture< CLTensor, CLAccessor, CLArithmeticAddition, T > CLArithmeticAdditionBroadcastFloatFixture
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
DataType
Available data types.
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
ConvertPolicy
Policy to handle integer overflow.
ArithmeticAdditionValidationFixture< CLTensor, CLAccessor, CLArithmeticAddition, T > CLArithmeticAdditionFixture