Compute Library
 21.02
LSTMLayerQuantized.cpp File Reference
#include "arm_compute/runtime/CL/functions/CLLSTMLayerQuantized.h"
#include "tests/CL/CLAccessor.h"
#include "tests/PaddingCalculator.h"
#include "tests/Utils.h"
#include "tests/datasets/LSTMLayerDataset.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "tests/validation/Validation.h"
#include <vector>

Go to the source code of this file.

Namespaces

 arm_compute
 Copyright (c) 2017-2021 Arm Limited.
 
 arm_compute::test
 
 arm_compute::test::validation
 

Functions

 TEST_CASE (RunSmall, framework::DatasetMode::PRECOMMIT)
 
 TEST_CASE (RunLarge, framework::DatasetMode::PRECOMMIT)
 
 TEST_SUITE_END () FIXTURE_DATA_TEST_CASE(RunSmall = ActivationValidationQuantizedFixture<CLTensor, CLAccessor, CLActivationLayer, T>
 [CLActivationLayer Test snippet] More...
 
QuantizationInfo qasymm (1.f/128.f, 128)
 
QuantizationInfo qweights (1.f/16.f, 16)
 
QuantizationInfo qsymm_3 (8.f/32768.f, 0)
 
QuantizationInfo qsymm_4 (16.f/32768.f, 0)
 
input allocator () -> allocate()
 
 fill_tensor (input_to_input_weights, std::vector< uint8_t >{ 122, 130, 124, 134, 120, 122, 134, 134 })
 
 fill_tensor (input_to_forget_weights, std::vector< uint8_t > { 204, 193, 148, 59, 113, 17, 66, 197 })
 
 fill_tensor (input_to_cell_weights, std::vector< uint8_t > { 172, 101, 184, 209, 165, 82, 108, 209 })
 
 fill_tensor (input_to_output_weights, std::vector< uint8_t > { 203, 244, 219, 114, 130, 16, 163, 222 })
 
 fill_tensor (recurrent_to_input_weights, std::vector< uint8_t > { 162, 168, 7, 95, 91, 155, 108, 216, 255, 100, 48, 188, 58, 37, 186, 147 })
 
 fill_tensor (recurrent_to_forget_weights, std::vector< uint8_t > { 46, 58, 47, 170, 246, 96, 12, 99, 68, 23, 186, 161, 237, 164, 89, 6 })
 
 fill_tensor (recurrent_to_cell_weights, std::vector< uint8_t > { 234, 99, 71, 206, 205, 159, 64, 253, 191, 148, 116, 8, 209, 136, 59, 138 })
 
 fill_tensor (recurrent_to_output_weights, std::vector< uint8_t > { 23, 241, 137, 36, 206, 5, 227, 56, 254, 176, 231, 47, 18, 201, 161, 11 })
 
 fill_tensor (input_gate_bias, std::vector< int > {-103038, 30525, 115255, -38154 })
 
 fill_tensor (forget_gate_bias, std::vector< int > { -23428, 126970, 116806, 46307 })
 
 fill_tensor (cell_gate_bias, std::vector< int > { 128006, 69949, -42808, 42568 })
 
 fill_tensor (output_gate_bias, std::vector< int > { -67066, -53607, 47233, 7300 })
 
 fill_tensor (output_state, std::vector< uint8_t > { 128, 128, 128, 128, 128, 128, 128, 128 })
 
 fill_tensor (cell_state, std::vector< int16_t > { 0, 0, 0, 0, 0, 0, 0, 0 })
 
 fill_tensor (input, std::vector< uint8_t > { 106, 193, 155, 150 })
 
 fill_tensor (expected_output, std::vector< uint8_t > { 128, 128, 31, 128, 128, 128, 31, 128 })
 
lstmq run ()
 
 validate (CLAccessor(output_state), expected_output)
 
 fill_tensor (expected_output, std::vector< uint8_t > { 128, 128, 5, 128, 128, 128, 5, 128 })
 
 fill_tensor (expected_output, std::vector< uint8_t > { 128, 128, 1, 128, 128, 128, 1, 128, })
 

Variables

const int input_size = 2
 
const int output_size = 4
 
TensorShape input_shape { input_size, batch_size }
 Validate test suite is to test ARM_COMPUTE_RETURN_ON_* macros we use to check the validity of given arguments in NEScale and subsequent call to NEScaleKernel. More...
 
TensorShape input_weights_shape { input_size, output_size }
 
TensorShape recurrent_weights_shape { output_size, output_size }
 
TensorShape output_shape { output_size, batch_size}
 
TensorShape bias_shape { output_size }
 
auto input_to_input_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights)
 
auto input_to_forget_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights)
 
auto input_to_cell_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights)
 
auto input_to_output_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights)
 
auto recurrent_to_input_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights)
 
auto recurrent_to_forget_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights)
 
auto recurrent_to_cell_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights)
 
auto recurrent_to_output_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights)
 
auto input_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32)
 
auto forget_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32)
 
auto cell_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32)
 
auto output_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32)
 
auto input = create_tensor<CLTensor>(input_shape, DataType::QASYMM8, 1, qasymm)
 
auto output_state = create_tensor<CLTensor>(output_shape, DataType::QASYMM8, 1, qasymm)
 
auto cell_state = create_tensor<CLTensor>(output_shape, DataType::QSYMM16, 1, qsymm_4)
 
CLLSTMLayerQuantized lstmq
 
SimpleTensor< uint8_t > expected_output (output_shape, DataType::QASYMM8, 1, qasymm)