Compute Library
 21.08
LSTMLayerQuantized.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 #include "tests/CL/CLAccessor.h"
27 #include "tests/Utils.h"
28 #include "tests/datasets/LSTMLayerDataset.h"
30 #include "tests/framework/Macros.h"
33 
34 #include <vector>
35 
36 namespace arm_compute
37 {
38 namespace test
39 {
40 namespace validation
41 {
42 namespace
43 {
44 template <typename T>
45 inline void fill_tensor(CLTensor &tensor, const std::vector<T> &v)
46 {
47  tensor.map(true);
48  // Import memory accounting for padding
49  TensorShape t_shape = tensor.info()->tensor_shape();
50  Window window;
51  window.use_tensor_dimensions(t_shape);
52  Iterator out(&tensor, window);
53  execute_window_loop(window, [&](const Coordinates & id)
54  {
55  *reinterpret_cast<T *>(out.ptr()) = v[coord2index(t_shape, id)];
56  },
57  out);
58  tensor.unmap();
59 }
60 
61 template <typename T>
62 inline void fill_tensor(SimpleTensor<T> &tensor, const std::vector<T> &v)
63 {
64  std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
65 }
66 
67 } // namespace
68 
69 TEST_SUITE(CL)
70 TEST_SUITE(LSTMLayerQuantized)
71 
72 // *INDENT-OFF*
73 // clang-format off
74 TEST_SUITE(IntegrationTestCase)
75 TEST_SUITE(MultSmallerEq1)
76 TEST_CASE(RunSmall, framework::DatasetMode::PRECOMMIT)
77 {
78  const int batch_size = 2;
79  const int input_size = 2;
80  const int output_size = 4;
81 
82  QuantizationInfo qasymm(1.f / 128.f, 128);
83  QuantizationInfo qweights(1.f / 128.f, 128);
84  QuantizationInfo qsymm_3(8.f / 32768.f, 0);
85  QuantizationInfo qsymm_4(16.f / 32768.f, 0);
86 
87  TensorShape input_shape{ input_size, batch_size };
90  TensorShape output_shape{ output_size, batch_size};
91  TensorShape bias_shape{ output_size };
92 
93  auto input_to_input_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
94  auto input_to_forget_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
95  auto input_to_cell_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
96  auto input_to_output_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
101  auto input_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
102  auto forget_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
103  auto cell_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
104  auto output_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
105 
106  // LSTM input
107  auto input = create_tensor<CLTensor>(input_shape, DataType::QASYMM8, 1, qasymm);
108 
109  // LSTM output state
110  auto output_state = create_tensor<CLTensor>(output_shape, DataType::QASYMM8, 1, qasymm);
111 
112  // LSTM cell state
113  auto cell_state = create_tensor<CLTensor>(output_shape, DataType::QSYMM16, 1, qsymm_4);
114 
116 
117  lstmq.configure(&input, &input_to_input_weights, &input_to_forget_weights, &input_to_cell_weights, &input_to_output_weights,
118  &recurrent_to_input_weights, &recurrent_to_forget_weights, &recurrent_to_cell_weights, &recurrent_to_output_weights,
119  &input_gate_bias, &forget_gate_bias, &cell_gate_bias, &output_gate_bias, &cell_state, &output_state, &cell_state, &output_state);
120 
121  input.allocator()->allocate();
122  input_to_input_weights.allocator()->allocate();
123  input_to_forget_weights.allocator()->allocate();
124  input_to_cell_weights.allocator()->allocate();
125  input_to_output_weights.allocator()->allocate();
126  recurrent_to_input_weights.allocator()->allocate();
127  recurrent_to_forget_weights.allocator()->allocate();
128  recurrent_to_cell_weights.allocator()->allocate();
129  recurrent_to_output_weights.allocator()->allocate();
130  input_gate_bias.allocator()->allocate();
131  forget_gate_bias.allocator()->allocate();
132  cell_gate_bias.allocator()->allocate();
133  output_gate_bias.allocator()->allocate();
134  cell_state.allocator()->allocate();
135  output_state.allocator()->allocate();
136 
137  // Fill weights and biases
138  fill_tensor(input_to_input_weights, std::vector<uint8_t>{ 47, 168,
139  66, 239,
140  6, 42,
141  237, 236 });
142 
143  fill_tensor(input_to_forget_weights, std::vector<uint8_t> { 204, 193,
144  148, 59,
145  113, 17,
146  66, 197 });
147 
148  fill_tensor(input_to_cell_weights, std::vector<uint8_t> { 172, 101,
149  184, 209,
150  165, 82,
151  108, 209 });
152 
153  fill_tensor(input_to_output_weights, std::vector<uint8_t> { 203, 244,
154  219, 114,
155  130, 16,
156  163, 222 });
157 
158  fill_tensor(recurrent_to_input_weights, std::vector<uint8_t> { 162, 168, 7, 95,
159  91, 155, 108, 216,
160  255, 100, 48, 188,
161  58, 37, 186, 147 });
162 
163  fill_tensor(recurrent_to_forget_weights, std::vector<uint8_t> { 46, 58, 47, 170,
164  246, 96, 12, 99,
165  68, 23, 186, 161,
166  237, 164, 89, 6 });
167 
168  fill_tensor(recurrent_to_cell_weights, std::vector<uint8_t> { 234, 99, 71, 206,
169  205, 159, 64, 253,
170  191, 148, 116, 8,
171  209, 136, 59, 138 });
172 
173  fill_tensor(recurrent_to_output_weights, std::vector<uint8_t> { 23, 241, 137, 36,
174  206, 5, 227, 56,
175  254, 176, 231, 47,
176  18, 201, 161, 11 });
177 
178  fill_tensor(input_gate_bias, std::vector<int> {-103038, 30525, 115255, -38154 });
179  fill_tensor(forget_gate_bias, std::vector<int> { -23428, 126970, 116806, 46307 });
180  fill_tensor(cell_gate_bias, std::vector<int> { 128006, 69949, -42808, 42568 });
181  fill_tensor(output_gate_bias, std::vector<int> { -67066, -53607, 47233, 7300 });
182 
183  SimpleTensor<uint8_t> expected_output(output_shape, DataType::QASYMM8, 1, qasymm);
184 
185  // Initialize state
186  fill_tensor(output_state, std::vector<uint8_t> { 128, 128, 128, 128,
187  128, 128, 128, 128 });
188  fill_tensor(cell_state, std::vector<int16_t> { 0, 0, 0, 0,
189  0, 0, 0, 0 });
190 
191  // First input
192  fill_tensor(input, std::vector<uint8_t> { 106, 193,
193  155, 150 });
194 
195  fill_tensor(expected_output, std::vector<uint8_t> { 128, 130, 36, 134,
196  128, 131, 35, 133 });
197 
198  lstmq.run();
199  validate(CLAccessor(output_state), expected_output);
200 
201  // Second input
202  fill_tensor(expected_output, std::vector<uint8_t> { 128, 129, 12, 137,
203  128, 131, 10, 136 });
204  lstmq.run();
205  validate(CLAccessor(output_state), expected_output);
206 
207  // Third input
208  fill_tensor(expected_output, std::vector<uint8_t> { 128, 129, 8, 140,
209  128, 130, 6, 138 });
210  lstmq.run();
211  validate(CLAccessor(output_state), expected_output);
212 }
213 
215 {
216  const int batch_size = 16;
217  const int input_size = 8;
218  const int output_size = 8;
219 
220 
221  QuantizationInfo qasymm(1.f / 128.f, 128);
222  QuantizationInfo qweights(1.f / 128.f, 128);
223  QuantizationInfo qsymm_3(8.f / 32768.f, 0);
224  QuantizationInfo qsymm_4(16.f / 32768.f, 0);
225 
226  TensorShape input_shape{ input_size, batch_size };
229  TensorShape output_shape{ output_size, batch_size};
230  TensorShape bias_shape{ output_size };
231 
232  auto input_to_input_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
233  auto input_to_forget_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
234  auto input_to_cell_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
235  auto input_to_output_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
240  auto input_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
241  auto forget_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
242  auto cell_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
243  auto output_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
244 
245  // LSTM input
246  auto input = create_tensor<CLTensor>(input_shape, DataType::QASYMM8, 1, qasymm);
247 
248  // LSTM output state
249  auto output_state = create_tensor<CLTensor>(output_shape, DataType::QASYMM8, 1, qasymm);
250 
251  // LSTM cell state
252  auto cell_state = create_tensor<CLTensor>(output_shape, DataType::QSYMM16, 1, qsymm_4);
253 
255 
256  lstmq.configure(&input, &input_to_input_weights, &input_to_forget_weights, &input_to_cell_weights, &input_to_output_weights,
257  &recurrent_to_input_weights, &recurrent_to_forget_weights, &recurrent_to_cell_weights, &recurrent_to_output_weights,
258  &input_gate_bias, &forget_gate_bias, &cell_gate_bias, &output_gate_bias, &cell_state, &output_state, &cell_state, &output_state);
259 
260  input.allocator()->allocate();
261  input_to_input_weights.allocator()->allocate();
262  input_to_forget_weights.allocator()->allocate();
263  input_to_cell_weights.allocator()->allocate();
264  input_to_output_weights.allocator()->allocate();
265  recurrent_to_input_weights.allocator()->allocate();
266  recurrent_to_forget_weights.allocator()->allocate();
267  recurrent_to_cell_weights.allocator()->allocate();
268  recurrent_to_output_weights.allocator()->allocate();
269  input_gate_bias.allocator()->allocate();
270  forget_gate_bias.allocator()->allocate();
271  cell_gate_bias.allocator()->allocate();
272  output_gate_bias.allocator()->allocate();
273  cell_state.allocator()->allocate();
274  output_state.allocator()->allocate();
275 
276  // Fill weights and biases
277  fill_tensor(input_to_input_weights, std::vector<uint8_t>{ 141, 89, 200, 180, 46, 50, 87, 128,
278  149, 227, 177, 187, 212, 229, 54, 111,
279  131, 116, 3, 58, 196, 26, 131, 255,
280  22, 106, 216, 69, 239, 12, 232, 207,
281  184, 56, 236, 172, 28, 143, 161, 124,
282  255, 33, 197, 122, 47, 197, 26, 229,
283  91, 79, 11, 160, 26, 80, 100, 36,
284  248, 186, 97, 61, 125, 46, 14, 100, });
285 
286  fill_tensor(input_to_forget_weights, std::vector<uint8_t> { 237, 165, 141, 249, 72, 116, 36 , 115,
287  234, 213, 85, 84, 59, 62, 150, 246,
288  182, 102, 158, 214, 182, 183, 94, 11,
289  158, 192, 92, 189, 160, 219, 206, 249,
290  88, 213, 193, 244, 151, 72, 129, 49,
291  239, 83, 106, 9, 169, 187, 125, 171,
292  32, 141, 126, 92, 13, 36, 224, 150,
293  187, 250, 178, 169, 89, 214, 91, 173 });
294 
295  fill_tensor(input_to_cell_weights, std::vector<uint8_t> { 93, 103, 226, 139, 185, 252, 129, 171,
296  159, 32, 25, 175, 224, 183, 165, 35,
297  207, 69, 238, 228, 149, 214, 79, 6,
298  5, 66, 102, 14, 19, 111, 36, 143,
299  22, 85, 13, 78, 236, 121, 122, 77,
300  249, 39, 88, 12, 205, 143, 93, 240,
301  167, 89, 188, 50, 73, 69, 201, 251,
302  59, 32, 203, 184, 139, 191, 199, 74});
303 
304  fill_tensor(input_to_output_weights, std::vector<uint8_t> { 205, 7, 95, 104, 252, 143, 226, 73,
305  229, 114, 152, 171, 221, 153, 73, 229,
306  153, 165, 223, 239, 100, 38, 172, 211,
307  226, 133, 239, 207, 116, 230, 170, 100,
308  241, 95, 171, 124, 63, 115, 32, 127,
309  141, 239, 53, 193, 201, 53, 104, 178,
310  186, 212, 167, 107, 226, 230, 71, 213,
311  148, 217, 19, 248, 233, 195, 183, 156 });
312 
313  fill_tensor(recurrent_to_input_weights, std::vector<uint8_t> { 147, 112, 140, 103, 3, 255, 17, 49,
314  84, 112, 144, 213, 138, 142, 112, 66,
315  117, 30, 101, 35, 25, 132, 211, 229,
316  183, 208, 102, 16, 38, 85, 101, 152,
317  226, 83, 132, 22, 161, 110, 157, 129,
318  184, 63, 168, 42, 220, 126, 209, 157,
319  5, 88, 243, 83, 249, 19, 226, 209,
320  173, 96, 185, 77, 146, 227, 238, 136 });
321 
322 
323  fill_tensor(recurrent_to_forget_weights, std::vector<uint8_t> { 52, 132, 92, 200, 213, 32, 213, 37,
324  116, 142, 116, 180, 4, 172, 158, 143,
325  110, 40, 99, 28, 221, 153, 133, 2,
326  247, 144, 198, 100, 20, 15, 221, 196,
327  159, 178, 188, 151, 171, 15, 25, 217,
328  178, 109, 110, 118, 128, 39, 232, 234,
329  184, 214, 177, 13, 56, 6, 28, 252,
330  89, 187, 242, 59, 146, 111, 132, 129});
331 
332  fill_tensor(recurrent_to_cell_weights, std::vector<uint8_t> { 70, 44, 137, 29, 36, 127, 1, 241,
333  26, 241, 142, 114, 67, 181, 49, 57,
334  131, 152, 175, 77, 23, 63, 37, 124,
335  150, 113, 95, 103, 110, 201, 69, 97,
336  196, 242, 62, 214, 66, 19, 45, 135,
337  22, 168, 149, 104, 77, 101, 36, 68,
338  170, 116, 222, 100, 109, 1, 154, 18,
339  133, 215, 105, 93, 31, 57, 231, 112 });
340 
341 
342  fill_tensor(recurrent_to_output_weights, std::vector<uint8_t> { 45 , 181 , 220 , 219 , 49 , 63 , 49 , 129,
343  7 , 166 , 104 , 114 , 83 , 40 , 1 , 195,
344  245 , 142 , 82 , 232 , 104 , 245 , 82 , 196,
345  111 , 56 , 156 , 9 , 141 , 240 , 180 , 148,
346  247 , 198 , 234 , 137 , 13 , 210 , 161 , 192,
347  196 , 59 , 233 , 184 , 142 , 187 , 140 , 166,
348  2 , 95 , 152 , 46 , 71 , 46 , 113 , 32,
349  175 , 229 , 86 , 87 , 62 , 93 , 74 , 130});
350 
351  fill_tensor(input_gate_bias, std::vector<int> { -40040, -106916, -92315, -79123, 45160, -17954, 50962, -63758 });
352  fill_tensor(forget_gate_bias, std::vector<int> { -128514, 8463, -57831, 116977, 106547, -28132, -124557, 44941 });
353  fill_tensor(cell_gate_bias, std::vector<int> { 88388 , 123601, -116148, -13022, 21619, 48926, 57523, 39332 });
354  fill_tensor(output_gate_bias, std::vector<int> { 59485 , -33070, 21386, -100633, -115959, 125768, -56407, 24897 });
355 
356  SimpleTensor<uint8_t> expected_output(output_shape, DataType::QASYMM8, 1, qasymm);
357 
358  // Initialize state
359  fill_tensor(output_state, std::vector<uint8_t> { 128, 128, 128, 128, 128, 128, 128, 128,
360  128, 128, 128, 128, 128, 128, 128, 128,
361  128, 128, 128, 128, 128, 128, 128, 128,
362  128, 128, 128, 128, 128, 128, 128, 128,
363  128, 128, 128, 128, 128, 128, 128, 128,
364  128, 128, 128, 128, 128, 128, 128, 128,
365  128, 128, 128, 128, 128, 128, 128, 128,
366  128, 128, 128, 128, 128, 128, 128, 128,
367  128, 128, 128, 128, 128, 128, 128, 128,
368  128, 128, 128, 128, 128, 128, 128, 128,
369  128, 128, 128, 128, 128, 128, 128, 128,
370  128, 128, 128, 128, 128, 128, 128, 128,
371  128, 128, 128, 128, 128, 128, 128, 128,
372  128, 128, 128, 128, 128, 128, 128, 128,
373  128, 128, 128, 128, 128, 128, 128, 128,
374  128, 128, 128, 128, 128, 128, 128, 128 });
375 
376  fill_tensor(cell_state, std::vector<int16_t> { 0, 0, 0, 0, 0, 0, 0, 0,
377  0, 0, 0, 0, 0, 0, 0, 0,
378  0, 0, 0, 0, 0, 0, 0, 0,
379  0, 0, 0, 0, 0, 0, 0, 0,
380  0, 0, 0, 0, 0, 0, 0, 0,
381  0, 0, 0, 0, 0, 0, 0, 0,
382  0, 0, 0, 0, 0, 0, 0, 0,
383  0, 0, 0, 0, 0, 0, 0, 0,
384  0, 0, 0, 0, 0, 0, 0, 0,
385  0, 0, 0, 0, 0, 0, 0, 0,
386  0, 0, 0, 0, 0, 0, 0, 0,
387  0, 0, 0, 0, 0, 0, 0, 0,
388  0, 0, 0, 0, 0, 0, 0, 0,
389  0, 0, 0, 0, 0, 0, 0, 0,
390  0, 0, 0, 0, 0, 0, 0, 0,
391  0, 0, 0, 0, 0, 0, 0, 0});
392 
393  // First input
394  fill_tensor(input, std::vector<uint8_t> { 247, 203, 159, 131, 182, 114, 207, 195,
395  48 , 61 , 154, 16, 80, 101, 116, 255,
396  50 , 115 , 45, 186, 75, 212, 98, 48,
397  88 , 146 , 24, 143, 218, 174, 203, 200,
398  239 , 16 , 66, 136, 234, 54, 94, 51,
399  101 , 128 , 220, 213, 164, 82, 137, 255,
400  70 , 165 , 234, 220, 66, 35, 183, 206,
401  39 , 57 , 180, 202, 23, 172, 224, 109,
402  102 , 215 , 186, 82, 215, 147, 85, 187,
403  96 , 249 , 59, 116, 150, 44, 167, 128,
404  34 , 217 , 148, 193, 243, 38, 250, 208,
405  112 , 130 , 208, 29, 16, 122, 20, 92,
406  24 , 72 , 104, 29, 150, 233, 151, 19,
407  158 , 192 , 254, 70, 73, 142, 106, 152,
408  3 , 61 , 24, 135, 212, 9, 80, 234,
409  147 , 246 , 83, 249, 49, 14, 68, 50});
410 
411  fill_tensor(expected_output, std::vector<uint8_t> {131, 128, 128, 128, 128, 180, 129, 133,
412  136, 128, 126, 128, 128, 173, 135, 130,
413  160, 128, 128, 128, 128, 138, 132, 129,
414  131, 128, 127, 128, 128, 169, 129, 131,
415  133, 128, 128, 128, 128, 182, 130, 129,
416  131, 128, 128, 128, 128, 163, 129, 130,
417  131, 128, 128, 128, 128, 149, 132, 129,
418  143, 128, 127, 128, 128, 150, 134, 131,
419  134, 128, 128, 128, 128, 167, 130, 130,
420  131, 128, 128, 128, 128, 152, 132, 129,
421  128, 128, 128, 128, 128, 169, 130, 130,
422  173, 128, 128, 128, 128, 148, 139, 130,
423  152, 128, 128, 128, 128, 168, 139, 132,
424  147, 128, 128, 128, 128, 161, 131, 132,
425  130, 128, 128, 128, 128, 159, 134, 128,
426  140, 128, 128, 128, 128, 133, 132, 128 });
427 
428  lstmq.run();
429  validate(CLAccessor(output_state), expected_output);
430 
431  // Second input
432  fill_tensor(expected_output, std::vector<uint8_t> { 130, 128, 128, 128, 128, 205, 129, 137,
433  135, 128, 127, 128, 128, 190, 137, 132,
434  160, 128, 128, 128, 128, 142, 133, 131,
435  130, 128, 128, 128, 128, 185, 129, 133,
436  132, 128, 128, 128, 128, 198, 131, 130,
437  130, 128, 128, 128, 128, 178, 130, 131,
438  131, 128, 128, 128, 128, 158, 132, 131,
439  142, 128, 127, 128, 128, 158, 135, 134,
440  133, 128, 128, 128, 128, 178, 131, 132,
441  131, 128, 128, 128, 128, 160, 132, 130,
442  128, 128, 128, 128, 128, 190, 131, 131,
443  170, 128, 128, 128, 128, 157, 142, 131,
444  149, 128, 128, 128, 128, 178, 142, 135,
445  145, 128, 128, 128, 129, 173, 132, 135,
446  129, 128, 128, 128, 128, 171, 134, 129,
447  140, 128, 128, 128, 128, 135, 132, 129});
448  lstmq.run();
449  validate(CLAccessor(output_state), expected_output);
450 }
451 TEST_SUITE_END() // MultSmallerEq1
452 
453 TEST_SUITE(MultGreater1)
455 {
456  //Input sequence length is 1
457  const int batch_size = 2;
458  const int input_size = 2;
459  const int output_size = 4;
460 
461  QuantizationInfo qasymm(1.f / 128.f, 128);
462  QuantizationInfo qweights(1.f / 16.f, 16);
463  QuantizationInfo qsymm_3(8.f / 32768.f, 0);
464  QuantizationInfo qsymm_4(16.f / 32768.f, 0);
465 
470  TensorShape bias_shape{ output_size };
471 
480  auto input_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
481  auto forget_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
482  auto cell_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
483  auto output_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
484 
485  // LSTM input
486  auto input = create_tensor<CLTensor>(input_shape, DataType::QASYMM8, 1, qasymm);
487 
488  // LSTM output state
489  auto output_state = create_tensor<CLTensor>(output_shape, DataType::QASYMM8, 1, qasymm);
490 
491  // LSTM cell state
492  auto cell_state = create_tensor<CLTensor>(output_shape, DataType::QSYMM16, 1, qsymm_4);
493 
495 
499 
500  input.allocator()->allocate();
501  input_to_input_weights.allocator()->allocate();
502  input_to_forget_weights.allocator()->allocate();
503  input_to_cell_weights.allocator()->allocate();
504  input_to_output_weights.allocator()->allocate();
505  recurrent_to_input_weights.allocator()->allocate();
506  recurrent_to_forget_weights.allocator()->allocate();
507  recurrent_to_cell_weights.allocator()->allocate();
508  recurrent_to_output_weights.allocator()->allocate();
509  input_gate_bias.allocator()->allocate();
510  forget_gate_bias.allocator()->allocate();
511  cell_gate_bias.allocator()->allocate();
512  output_gate_bias.allocator()->allocate();
513  cell_state.allocator()->allocate();
514  output_state.allocator()->allocate();
515 
516  // Fill weights and biases
517  fill_tensor(input_to_input_weights, std::vector<uint8_t>{ 122, 130,
518  124, 134,
519  120, 122,
520  134, 134 });
521 
522  fill_tensor(input_to_forget_weights, std::vector<uint8_t> { 204, 193,
523  148, 59,
524  113, 17,
525  66, 197 });
526 
527  fill_tensor(input_to_cell_weights, std::vector<uint8_t> { 172, 101,
528  184, 209,
529  165, 82,
530  108, 209 });
531 
532  fill_tensor(input_to_output_weights, std::vector<uint8_t> { 203, 244,
533  219, 114,
534  130, 16,
535  163, 222 });
536 
537  fill_tensor(recurrent_to_input_weights, std::vector<uint8_t> { 162, 168, 7, 95,
538  91, 155, 108, 216,
539  255, 100, 48, 188,
540  58, 37, 186, 147 });
541 
542  fill_tensor(recurrent_to_forget_weights, std::vector<uint8_t> { 46, 58, 47, 170,
543  246, 96, 12, 99,
544  68, 23, 186, 161,
545  237, 164, 89, 6 });
546 
547  fill_tensor(recurrent_to_cell_weights, std::vector<uint8_t> { 234, 99, 71, 206,
548  205, 159, 64, 253,
549  191, 148, 116, 8,
550  209, 136, 59, 138 });
551 
552  fill_tensor(recurrent_to_output_weights, std::vector<uint8_t> { 23, 241, 137, 36,
553  206, 5, 227, 56,
554  254, 176, 231, 47,
555  18, 201, 161, 11 });
556 
557  fill_tensor(input_gate_bias, std::vector<int> {-103038, 30525, 115255, -38154 });
558  fill_tensor(forget_gate_bias, std::vector<int> { -23428, 126970, 116806, 46307 });
559  fill_tensor(cell_gate_bias, std::vector<int> { 128006, 69949, -42808, 42568 });
560  fill_tensor(output_gate_bias, std::vector<int> { -67066, -53607, 47233, 7300 });
561 
562  SimpleTensor<uint8_t> expected_output(output_shape, DataType::QASYMM8, 1, qasymm);
563 
564  // Initialize state
565  fill_tensor(output_state, std::vector<uint8_t> { 128, 128, 128, 128,
566  128, 128, 128, 128 });
567  fill_tensor(cell_state, std::vector<int16_t> { 0, 0, 0, 0,
568  0, 0, 0, 0 });
569 
570  // First input
571  fill_tensor(input, std::vector<uint8_t> { 106, 193,
572  155, 150 });
573 
574  fill_tensor(expected_output, std::vector<uint8_t> { 128, 128, 31, 128,
575  128, 128, 31, 128 });
576 
577  lstmq.run();
578  validate(CLAccessor(output_state), expected_output);
579 
580  // Second input
581  fill_tensor(expected_output, std::vector<uint8_t> { 128, 128, 5, 128,
582  128, 128, 5, 128 });
583  lstmq.run();
584  validate(CLAccessor(output_state), expected_output);
585 
586  // Third input
587  fill_tensor(expected_output, std::vector<uint8_t> { 128, 128, 1, 128,
588  128, 128, 1, 128, });
589  lstmq.run();
590  validate(CLAccessor(output_state), expected_output);
591 }
592 TEST_SUITE_END() // MultGreater1
593 TEST_SUITE_END() // IntegrationTestCase
594 // clang-format on
595 // *INDENT-ON*
596 
597 TEST_SUITE_END() // LSTMLayerQuantized
598 TEST_SUITE_END() // CL
599 } // namespace validation
600 } // namespace test
601 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
quantized, symmetric fixed-point 16-bit number
QuantizationInfo qweights(1.f/16.f, 16)
SimpleTensor< uint8_t > expected_output(output_shape, DataType::QASYMM8, 1, qasymm)
Copyright (c) 2017-2021 Arm Limited.
int coord2index(const TensorShape &shape, const Coordinates &coord)
Linearise the given coordinate.
Definition: Utils.h:387
void configure(const ICLTensor *input, const ICLTensor *input_to_input_weights, const ICLTensor *input_to_forget_weights, const ICLTensor *input_to_cell_weights, const ICLTensor *input_to_output_weights, const ICLTensor *recurrent_to_input_weights, const ICLTensor *recurrent_to_forget_weights, const ICLTensor *recurrent_to_cell_weights, const ICLTensor *recurrent_to_output_weights, const ICLTensor *input_gate_bias, const ICLTensor *forget_gate_bias, const ICLTensor *cell_bias, const ICLTensor *output_gate_bias, ICLTensor *cell_state_in, const ICLTensor *output_state_in, ICLTensor *cell_state_out, ICLTensor *output_state_out)
Initialize function&#39;s tensors.
QuantizationInfo qsymm_3(8.f/32768.f, 0)
1 channel, 1 S32 per channel
Basic function to run CLLSTMLayerQuantized.
Quantization information.
TensorShape input_shape
Validate test suite is to test ARM_COMPUTE_RETURN_ON_* macros we use to check the validity of given a...
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
fill_tensor(input_to_input_weights, std::vector< uint8_t >{ 122, 130, 124, 134, 120, 122, 134, 134 })
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
Accessor implementation for CLTensor objects.
Definition: CLAccessor.h:36
TEST_SUITE(U8_to_S8) FIXTURE_DATA_TEST_CASE(RunSmall
validate(CLAccessor(output_state), expected_output)
void run() override
Run the kernels contained in the function.
QuantizationInfo qasymm(1.f/128.f, 128)
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
QuantizationInfo qsymm_4(16.f/32768.f, 0)