| DATA_TEST_CASE (Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(correct_input_shape, tensor_num_channel, DataType::F16), TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), TensorInfo(TensorShape(15U, 2U, 2U), tensor_num_channel, correct_input_dt), TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), }), framework::dataset::make("WeightInfo", { TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt), TensorInfo(correct_weight_shape, tensor_num_channel, DataType::F16), TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt), TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt), TensorInfo(TensorShape(15U, 2U), tensor_num_channel, correct_weight_dt), TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt), TensorInfo(TensorShape(14U), tensor_num_channel, correct_weight_dt), TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt), TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt), TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt), })), framework::dataset::make("BiasInfo", { TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt), TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt), TensorInfo(correct_bias_shape, tensor_num_channel, DataType::QSYMM16), TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt), TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt), TensorInfo(TensorShape(15U, 2U), tensor_num_channel, correct_bias_dt), TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt), TensorInfo(TensorShape(14U), tensor_num_channel, correct_bias_dt), TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt), TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt), })), framework::dataset::make("OutputInfo", { TensorInfo(correct_output_shape, tensor_num_channel, correct_output_dt), TensorInfo(correct_output_shape, tensor_num_channel, correct_output_dt), TensorInfo(correct_output_shape, tensor_num_channel, correct_output_dt), TensorInfo(correct_output_shape, tensor_num_channel, correct_output_dt), TensorInfo(correct_output_shape, tensor_num_channel, correct_output_dt), TensorInfo(correct_output_shape, tensor_num_channel, correct_output_dt), TensorInfo(correct_output_shape, tensor_num_channel, correct_output_dt), TensorInfo(correct_output_shape, tensor_num_channel, correct_output_dt), TensorInfo(TensorShape(15, 3), tensor_num_channel, correct_output_dt), TensorInfo(correct_output_shape, tensor_num_channel, DataType::S32), })), input_info, weight_info, bias_info, output_info) |
| FIXTURE_DATA_TEST_CASE (RandomValue1D, NEQLSTMLayerNormalizationFixture< int16_t >, framework::DatasetMode::ALL, concat(concat(combine(combine(zip(zip(QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 0 >("InputShape"), QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 0 >("WeightShape")), QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 0 >("BiasShape")), framework::dataset::make("DataType", DataType::QSYMM16)), framework::dataset::make("WeightQuantizationInfo", { QuantizationInfo(1./8192), QuantizationInfo(8192) })), combine(combine(zip(zip(QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 1 >("InputShape"), QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 1 >("WeightShape")), QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 1 >("BiasShape")), framework::dataset::make("DataType", DataType::QSYMM16)), framework::dataset::make("WeightQuantizationInfo", { QuantizationInfo(1./8192), QuantizationInfo(8192) }))), combine(combine(zip(zip(QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 2 >("InputShape"), QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 2 >("WeightShape")), QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 2 >("BiasShape")), framework::dataset::make("DataType", DataType::QSYMM16)), framework::dataset::make("WeightQuantizationInfo", { QuantizationInfo(1./8192), QuantizationInfo(8192) })))) |
| FIXTURE_DATA_TEST_CASE (RandomValue2D, NEQLSTMLayerNormalizationFixture< int16_t >, framework::DatasetMode::ALL, concat(concat(combine(combine(zip(zip(QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 3, 0 >("InputShape"), QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 0 >("WeightShape")), QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 0 >("BiasShape")), framework::dataset::make("DataType", DataType::QSYMM16)), framework::dataset::make("WeightQuantizationInfo", { QuantizationInfo(1./8192), QuantizationInfo(8192) })), combine(combine(zip(zip(QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 3, 1 >("InputShape"), QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 1 >("WeightShape")), QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 1 >("BiasShape")), framework::dataset::make("DataType", DataType::QSYMM16)), framework::dataset::make("WeightQuantizationInfo", { QuantizationInfo(1./8192), QuantizationInfo(8192) }))), combine(combine(zip(zip(QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 3, 2 >("InputShape"), QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 2 >("WeightShape")), QLSTMLayerNormShapeDataSet< qsymm16_per_vector, 1, 2 >("BiasShape")), framework::dataset::make("DataType", DataType::QSYMM16)), framework::dataset::make("WeightQuantizationInfo", { QuantizationInfo(1./8192), QuantizationInfo(8192) })))) |