32 #include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h" 33 #include "tests/datasets/LargeGEMMLowpDataset.h" 34 #include "tests/datasets/ShapeDatasets.h" 35 #include "tests/datasets/SmallGEMMLowpDataset.h" 40 #include "tests/validation/fixtures/GEMMLowpAssemblyFixture.h" 41 #include "tests/validation/fixtures/GEMMLowpFixture.h" 55 shape_a, shape_b, shape_c, a_offset, b_offset)
70 NEGEMMLowpMatrixMultiplyCore gemmlowp_mm;
71 gemmlowp_mm.configure(&a, &b,
nullptr, &c);
105 &b_info.clone()->set_is_resizable(
false),
107 &output_info.clone()->set_is_resizable(
false));
189 &b_info.clone()->set_is_resizable(
false),
190 &output_info.clone()->set_is_resizable(
false),
205 output_stage.gemmlowp_min_bound = 0;
206 output_stage.gemmlowp_max_bound = 205;
210 f.
configure(&input1, &input2, &output, output_stage);
281 &b_info.clone()->set_is_resizable(
false),
282 &output_info.clone()->set_is_resizable(
false),
308 TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint)
319 GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>;
322 GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
347 &b_info.clone()->set_is_resizable(
false),
348 &output_info.clone()->set_is_resizable(
false),
357 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
364 quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
372 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
379 quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
388 TEST_SUITE(QuantizeDownInt32ToInt8ScaleByFixedPoint)
399 GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint>;
429 &b_info.clone()->set_is_resizable(
false),
430 &output_info.clone()->set_is_resizable(
false),
439 quantize_down_int32_to_int8_scale_by_fixedpoint_cases))
447 quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases))
455 TEST_SUITE(QuantizeDownInt32ToInt16ScaleByFixedPoint)
477 GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
502 &b_info.clone()->set_is_resizable(
false),
503 &output_info.clone()->set_is_resizable(
false),
514 quantize_down_int32_to_int16_scale_by_fixedpoint_cases))
522 quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases))
532 quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases))
540 quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases))
GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture< Tensor, Accessor, NEGEMMLowpOutputStage > NEGEMMLowpQuantizeDownInt32ScaleFixture
quantized, symmetric fixed-point 16-bit number
const auto quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases
const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
int32_t gemmlowp_max_bound
GEMMLowp max value used to saturate down the output result before converting back to QASYMM8...
GEMMLowpOutputStageType type
GEMMLowp output stage type.
Copyright (c) 2017-2021 Arm Limited.
GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture< Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint > NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor's metadata.
1 channel, 1 S32 per channel
virtual bool is_resizable() const =0
Flag indicating whether the size of the tensor can be changed.
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min=std::numeric_limits< int32_t >::lowest(), int max=std::numeric_limits< int32_t >::max())
Static function to check if given info will lead to a valid configuration of NEGEMMLowpQuantizeDownIn...
Accessor implementation for Tensor objects.
DatasetMode
Possible dataset modes.
const auto quantize_down_int32_to_uint8_scale_relu_cases
GEMMLowpMatrixMultiplyCoreValidationFixture< Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore > NEGEMMLowpMatrixMultiplyCoreFixture
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
GEMMLowp output stage info.
Basic implementation of the tensor interface.
TEST_SUITE(U8_to_S8) FIXTURE_DATA_TEST_CASE(RunSmall
validate(CLAccessor(output_state), expected_output)
virtual PaddingSize padding() const =0
Padding of tensor.
virtual ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info)=0
Set the quantization settings (scale and offset) of the tensor.
BorderSize PaddingSize
Container for 2D padding size.
Quantize using an integer multiplication.
static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min=std::numeric_limits< int32_t >::lowest(), int max=std::numeric_limits< int32_t >::max())
Static function to check if given info will lead to a valid configuration of NEGEMMLowpQuantizeDownIn...
GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture< Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore > NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture< Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint > NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture
void configure(const ITensor *input, const ITensor *bias, ITensor *output, const GEMMLowpOutputStageInfo &info)
Initialise the kernel's inputs, output.
static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo &info)
Static function to check if given info will lead to a valid configuration of NEGEMMLowpOutputStage.
const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases
Basic function to execute GEMMLowpQuantizeDown kernels on Neon.
GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture< Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint > NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture
Store the tensor's metadata.
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration of NEGEMMLowpMatrixMultiply...
JoinDataset< T, U > concat(T &&dataset1, U &&dataset2)
Helper function to create a JoinDataset.
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
quantized, asymmetric fixed-point 8-bit number signed
int32_t gemmlowp_min_bound
GEMMLowp min value used to saturate down the output result before converting back to QASYMM8...
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
DataType output_data_type
Output tensor data type to use if the output is not initialized.
DataType
Available data types.
Basic function to execute GEMMLowpMatrixMultiplyCore on Neon.
const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
const auto quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases
const auto quantize_down_int32_to_int8_scale_relu_cases
static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min=std::numeric_limits< int32_t >::lowest(), int max=std::numeric_limits< int32_t >::max())
Static function to check if given info will lead to a valid configuration of NEGEMMLowpQuantizeDownIn...