34 #include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h" 35 #include "tests/datasets/LargeGEMMLowpDataset.h" 36 #include "tests/datasets/ShapeDatasets.h" 37 #include "tests/datasets/SmallGEMMLowpDataset.h" 42 #include "tests/validation/fixtures/GEMMLowpFixture.h" 56 shape_a, shape_b, shape_c, a_offset, b_offset)
71 NEGEMMLowpMatrixMultiplyCore gemmlowp_mm;
72 gemmlowp_mm.configure(&a, &b,
nullptr, &c);
106 &b_info.clone()->set_is_resizable(
false),
108 &output_info.clone()->set_is_resizable(
false));
123 auto gemm = std::make_unique<cpu::CpuGemmLowpMatrixMultiplyCore>();
130 gemm->configure(&a_info, &b_info,
nullptr, &dst_info,
gemm_info);
133 auto a = create_tensor<Tensor>(a_info);
134 auto b = create_tensor<Tensor>(b_info);
135 auto dst = create_tensor<Tensor>(dst_info);
136 a.allocator()->allocate();
137 b.allocator()->allocate();
138 dst.allocator()->allocate();
152 auto ws = manage_workspace<Tensor>(
gemm->workspace(), mg, run_pack, prep_pack);
154 auto run_conv = [&]() ->
Tensor 156 auto dst = create_tensor<Tensor>(dst_info);
163 gemm->prepare(prep_pack);
167 auto result_0 = run_conv();
168 auto result_1 = run_conv();
169 for(
size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
184 auto gemm = std::make_unique<NEGEMMLowpMatrixMultiplyCore>();
191 auto run_conv = [&]()
193 auto a = create_tensor<Tensor>(a_info);
194 auto b = create_tensor<Tensor>(b_info);
195 auto dst = create_tensor<Tensor>(dst_info);
197 a.allocator()->allocate();
198 b.allocator()->allocate();
199 dst.allocator()->allocate();
205 auto result_0 = run_conv();
206 auto result_1 = run_conv();
207 for(
size_t i = 0; i < result_0.info()->tensor_shape().total_size(); ++i)
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Copyright (c) 2017-2022 Arm Limited.
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor's metadata.
1 channel, 1 S32 per channel
virtual bool is_resizable() const =0
Flag indicating whether the size of the tensor can be changed.
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
Accessor implementation for Tensor objects.
DatasetMode
Possible dataset modes.
GEMMLowpMatrixMultiplyCoreValidationFixture< Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore > NEGEMMLowpMatrixMultiplyCoreFixture
std::unique_ptr< AssetsLibrary > library
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
Basic implementation of the tensor interface.
validate(CLAccessor(output_state), expected_output)
virtual PaddingSize padding() const =0
Padding of tensor.
virtual ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info)=0
Set the quantization settings (scale and offset) of the tensor.
UniqueGemmCommon< Top, Tret > gemm(const GemmArgs &args, const OutputStage &os)
BorderSize PaddingSize
Container for 2D padding size.
GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture< Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore > NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Store the tensor's metadata.
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration of NEGEMMLowpMatrixMultiply...
JoinDataset< T, U > concat(T &&dataset1, U &&dataset2)
Helper function to create a JoinDataset.
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
DataType
Available data types.
Function to run Gemm on quantized types.
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))