Compute Library
 22.08
QLSTMLayerNormalization.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 #include "tests/CL/CLAccessor.h"
26 #include "tests/CL/Helper.h"
28 #include "tests/datasets/ShapeDatasets.h"
30 #include "tests/framework/Macros.h"
34 #include "tests/validation/fixtures/QLSTMLayerNormalizationFixture.h"
35 
36 namespace arm_compute
37 {
38 namespace test
39 {
40 namespace validation
41 {
42 namespace
43 {
44 constexpr AbsoluteTolerance<int16_t> tolerance_s16(0); /**< Tolerance value for comparing reference's output against implementation's output for QSYMM16 data types */
45 constexpr uint32_t vector_size_byte = 16;
46 
47 using test::datasets::ShapeDataset;
48 using CLQLSTMLayerNormalization = CLSynthetizeFunction<CLQLSTMLayerNormalizationKernel>;
49 template <uint32_t num_elements_per_iter, uint32_t num_batches, uint32_t num_iteration>
50 class QLSTMLayerNormShapeDataSet : public ShapeDataset
51 {
52  static constexpr auto boundary_minus_one = num_elements_per_iter * num_iteration - 1;
53  static constexpr auto boundary = num_elements_per_iter * num_iteration;
54  static constexpr auto boundary_plus_one = num_elements_per_iter * num_iteration + 1;
55 
56 public:
57  QLSTMLayerNormShapeDataSet(std::string name)
58  : ShapeDataset(name,
59  {
60  TensorShape{ boundary_minus_one, num_batches },
61  TensorShape{ boundary, num_batches },
62  TensorShape{ boundary_plus_one, num_batches }
63  })
64  {
65  }
66 };
67 
68 template <uint32_t num_elements_per_iter, uint32_t num_batches>
69 class QLSTMLayerNormShapeDataSet<num_elements_per_iter, num_batches, 0> : public ShapeDataset
70 {
71 public:
72  QLSTMLayerNormShapeDataSet(std::string name)
73  : ShapeDataset(name,
74  {
75  TensorShape{ 1, num_batches },
76  TensorShape{ 2, num_batches }
77  })
78  {
79  }
80 };
81 } // namespace
82 TEST_SUITE(CL)
83 TEST_SUITE(QLSTMLayerNormalization)
84 
85 static const TensorShape correct_input_shape{ TensorShape(15U, 2U) };
86 static const TensorShape correct_weight_shape{ TensorShape(15U) };
87 static const TensorShape correct_bias_shape{ TensorShape(15U) };
88 static const DataType correct_input_dt{ DataType::QSYMM16 };
89 static const DataType correct_weight_dt{ DataType::QSYMM16 };
90 static const DataType correct_bias_dt{ DataType::S32 };
91 static const uint32_t tensor_num_channel{ 1 };
92 
93 // *INDENT-OFF*
94 // clang-format off
95 
97  zip(zip(
98  framework::dataset::make("InputInfo", {
99  TensorInfo(correct_input_shape, tensor_num_channel, DataType::F16), // input supports only QSYMM16
100  TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), // weight supports only QSYMM16
101  TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), // bias supports only S32
102  TensorInfo(TensorShape(15U, 2U, 2U), tensor_num_channel, correct_input_dt), // input supports only up to 2D
103  TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), // weight supports only up to 1D
104  TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), // bias supports only up to 1D
105  TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), // input_shape[0] != weight_shape[0] should fail
106  TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), // weight_shape[0] != bias_shape[0] should fail
107  }),
108  framework::dataset::make("WeightInfo", {
109  TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
110  TensorInfo(correct_weight_shape, tensor_num_channel, DataType::F16),
111  TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
112  TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
113  TensorInfo(TensorShape(15U, 2U), tensor_num_channel, correct_weight_dt),
114  TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
115  TensorInfo(TensorShape(14U), tensor_num_channel, correct_weight_dt),
116  TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
117  })
118  ),
119  framework::dataset::make("BiasInfo", {
120  TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
121  TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
122  TensorInfo(correct_bias_shape, tensor_num_channel, DataType::QSYMM16),
123  TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
124  TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
125  TensorInfo(TensorShape(15U, 2U), tensor_num_channel, correct_bias_dt),
126  TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
127  TensorInfo(TensorShape(14U), tensor_num_channel, correct_bias_dt),
128  })
129  ), input_info, weight_info, bias_info)
130 {
131  TensorInfo dummy_output{};
132  const Status s = CLQLSTMLayerNormalization::validate(&input_info, &dummy_output, &weight_info, &bias_info);
134 }
135 
136 // clang-format on
137 // *INDENT-ON*
138 
139 template <typename T>
140 using CLQLSTMLayerNormalizationFixture = QLSTMLayerNormalizationValidationFixture<CLTensor, CLAccessor, CLQLSTMLayerNormalization, T>;
141 
142 TEST_SUITE(Quantized)
143 TEST_SUITE(QSYMM16)
144 
145 /** Tests will be targetting
146  * - Comparison between OpenCL kernel and the exact same but scalar version of reference kernel
147  * - Input shapes of 1D and 2D with the first dimension covers boundary values of 128-bit vector size (0~3 iterations)
148  * - Weight and bias 1D shape that have same size as that of input shapes
149  * - Quantization scale is greater and smaller than one.
150  * - Input values will be noted in fixture.
151  *
152  * What we can't test
153  * - Since reference kernel uses the exact the same algorithm in the same quantized domain
154  * it is hard to fully test whether the algorithm accomplishes what it is supposed to.
155  * - The algorithm has been sensitive to quantization scale but it is hard to fully test
156  * the sensitivity due to aforementioned reason.
157  * - Again, it is hard to fully test corner values due to the exact same algorithm of the
158  * reference kernel and the OpenCL kernel.
159  */
160 
161 constexpr uint32_t qsymm16_per_vector = vector_size_byte / sizeof(int16_t);
162 
163 #define QSYMM16_DATASET_ITER(num_input_batch, num_iter) \
164  combine(combine(zip(zip(QLSTMLayerNormShapeDataSet<qsymm16_per_vector, num_input_batch, num_iter>("InputShape"), \
165  QLSTMLayerNormShapeDataSet<qsymm16_per_vector, 1, num_iter>("WeightShape")), \
166  QLSTMLayerNormShapeDataSet<qsymm16_per_vector, 1, num_iter>("BiasShape")), \
167  framework::dataset::make("DataType", DataType::QSYMM16)), \
168  framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1. / 8192), QuantizationInfo(8192) }))
169 
170 #define QSYMM16_DATASET_1D \
171  concat(concat(QSYMM16_DATASET_ITER(1, 0), QSYMM16_DATASET_ITER(1, 1)), QSYMM16_DATASET_ITER(1, 2))
172 
173 #define QSYMM16_DATASET_2D \
174  concat(concat(QSYMM16_DATASET_ITER(3, 0), QSYMM16_DATASET_ITER(3, 1)), QSYMM16_DATASET_ITER(3, 2))
175 
177 {
178  // Validate output
179  validate(CLAccessor(_target), _reference, tolerance_s16);
180 }
181 
183 {
184  // Validate output
185  validate(CLAccessor(_target), _reference, tolerance_s16);
186 }
187 
188 #undef QSYMM16_DATASET_ITER
189 #undef QSYMM16_DATASET_2D
190 #undef QSYMM16_DATASET_1D
191 
192 TEST_SUITE_END() // QSYMM16
193 TEST_SUITE_END() // Quantized
194 TEST_SUITE_END() // QLSTMLayerNormalization
195 TEST_SUITE_END() // CL
196 
197 } // namespace validation
198 } // namespace test
199 } // namespace arm_compute
constexpr uint32_t qsymm16_per_vector
Tests will be targetting.
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
Shape of a tensor.
Definition: TensorShape.h:39
#define QSYMM16_DATASET_2D
quantized, symmetric fixed-point 16-bit number
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Status class.
Definition: Error.h:52
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
1 channel, 1 S32 per channel
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
#define QSYMM16_DATASET_1D
Accessor implementation for CLTensor objects.
Definition: CLAccessor.h:36
validate(CLAccessor(output_state), expected_output)
const char * name
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
QLSTMLayerNormalizationValidationFixture< CLTensor, CLAccessor, CLQLSTMLayerNormalization, T > CLQLSTMLayerNormalizationFixture
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
DataType
Available data types.
Definition: Types.h:79