28 #include "tests/datasets/ShapeDatasets.h" 34 #include "tests/validation/fixtures/QLSTMLayerNormalizationFixture.h" 44 constexpr AbsoluteTolerance<int16_t> tolerance_s16(0);
45 constexpr uint32_t vector_size_byte = 16;
47 using test::datasets::ShapeDataset;
48 using CLQLSTMLayerNormalization = CLSynthetizeFunction<CLQLSTMLayerNormalizationKernel>;
49 template <u
int32_t num_elements_per_iter, u
int32_t num_batches, u
int32_t num_iteration>
50 class QLSTMLayerNormShapeDataSet :
public ShapeDataset
52 static constexpr
auto boundary_minus_one = num_elements_per_iter * num_iteration - 1;
53 static constexpr
auto boundary = num_elements_per_iter * num_iteration;
54 static constexpr
auto boundary_plus_one = num_elements_per_iter * num_iteration + 1;
57 QLSTMLayerNormShapeDataSet(std::string
name)
60 TensorShape{ boundary_minus_one, num_batches },
61 TensorShape{ boundary, num_batches },
62 TensorShape{ boundary_plus_one, num_batches }
68 template <u
int32_t num_elements_per_iter, u
int32_t num_batches>
69 class QLSTMLayerNormShapeDataSet<num_elements_per_iter, num_batches, 0> :
public ShapeDataset
72 QLSTMLayerNormShapeDataSet(std::string
name)
75 TensorShape{ 1, num_batches },
76 TensorShape{ 2, num_batches }
85 static const TensorShape correct_input_shape{ TensorShape(15U, 2U) };
86 static const TensorShape correct_weight_shape{ TensorShape(15U) };
87 static const TensorShape correct_bias_shape{ TensorShape(15U) };
91 static const uint32_t tensor_num_channel{ 1 };
100 TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt),
101 TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt),
103 TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt),
104 TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt),
105 TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt),
106 TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt),
109 TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
111 TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
112 TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
114 TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
116 TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
120 TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
121 TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
123 TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
124 TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
126 TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
139 template <
typename T>
163 #define QSYMM16_DATASET_ITER(num_input_batch, num_iter) \ 164 combine(combine(zip(zip(QLSTMLayerNormShapeDataSet<qsymm16_per_vector, num_input_batch, num_iter>("InputShape"), \ 165 QLSTMLayerNormShapeDataSet<qsymm16_per_vector, 1, num_iter>("WeightShape")), \ 166 QLSTMLayerNormShapeDataSet<qsymm16_per_vector, 1, num_iter>("BiasShape")), \ 167 framework::dataset::make("DataType", DataType::QSYMM16)), \ 168 framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1. / 8192), QuantizationInfo(8192) })) 170 #define QSYMM16_DATASET_1D \ 171 concat(concat(QSYMM16_DATASET_ITER(1, 0), QSYMM16_DATASET_ITER(1, 1)), QSYMM16_DATASET_ITER(1, 2)) 173 #define QSYMM16_DATASET_2D \ 174 concat(concat(QSYMM16_DATASET_ITER(3, 0), QSYMM16_DATASET_ITER(3, 1)), QSYMM16_DATASET_ITER(3, 2)) 188 #undef QSYMM16_DATASET_ITER 189 #undef QSYMM16_DATASET_2D 190 #undef QSYMM16_DATASET_1D constexpr uint32_t qsymm16_per_vector
Tests will be targetting.
#define QSYMM16_DATASET_2D
quantized, symmetric fixed-point 16-bit number
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
1 channel, 1 S32 per channel
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
#define QSYMM16_DATASET_1D
Accessor implementation for CLTensor objects.
TEST_SUITE(U8_to_S8) FIXTURE_DATA_TEST_CASE(RunSmall
validate(CLAccessor(output_state), expected_output)
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Store the tensor's metadata.
QLSTMLayerNormalizationValidationFixture< CLTensor, CLAccessor, CLQLSTMLayerNormalization, T > CLQLSTMLayerNormalizationFixture
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
DataType
Available data types.
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)