Compute Library
 21.02
GEMMLowp.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
29 #include "tests/NEON/Accessor.h"
30 #include "tests/NEON/Helper.h"
32 #include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h"
33 #include "tests/datasets/LargeGEMMLowpDataset.h"
34 #include "tests/datasets/ShapeDatasets.h"
35 #include "tests/datasets/SmallGEMMLowpDataset.h"
37 #include "tests/framework/Macros.h"
40 #include "tests/validation/fixtures/GEMMLowpAssemblyFixture.h"
41 #include "tests/validation/fixtures/GEMMLowpFixture.h"
42 
43 namespace arm_compute
44 {
45 namespace test
46 {
47 namespace validation
48 {
49 TEST_SUITE(NEON)
50 TEST_SUITE(GEMMLowp)
51 TEST_SUITE(MatrixMultiplyCore)
52 using NEGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
53 
54 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()),
55  shape_a, shape_b, shape_c, a_offset, b_offset)
56 {
57  // Create tensors
58  Tensor a = create_tensor<Tensor>(shape_a, DataType::QASYMM8);
59  Tensor b = create_tensor<Tensor>(shape_b, DataType::QASYMM8);
60  Tensor c = create_tensor<Tensor>(shape_c, DataType::S32);
61 
62  a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset));
63  b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset));
64 
66  ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
67  ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
68 
69  // Create and configure function
70  NEGEMMLowpMatrixMultiplyCore gemmlowp_mm;
71  gemmlowp_mm.configure(&a, &b, nullptr, &c);
72 
73  // Validate padding is zero
74  validate(a.info()->padding(), PaddingSize());
75  validate(b.info()->padding(), PaddingSize());
76  validate(c.info()->padding(), PaddingSize());
77 }
78 
79 // *INDENT-OFF*
80 // clang-format off
82  framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Input not a multiple of 4
83  TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Mismatching data type
84  TensorInfo(TensorShape(20U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
85  TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions
87  }),
93  })),
99  })),
100  framework::dataset::make("Expected", { true, false, false, false, true })),
101  a_info, b_info, output_info, expected)
102 {
103  // Lock tensors
104  Status status = NEGEMMLowpMatrixMultiplyCore::validate(&a_info.clone()->set_is_resizable(false),
105  &b_info.clone()->set_is_resizable(false),
106  nullptr,
107  &output_info.clone()->set_is_resizable(false));
109 }
110 // clang-format on
111 // *INDENT-ON*
112 
114 {
115  // Validate output
116  validate(Accessor(_target), _reference);
117 }
118 
120 {
121  // Validate output
122  validate(Accessor(_target), _reference);
123 }
124 
125 using NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture = GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
126 TEST_SUITE(FusedOffsetOutput)
127 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpFusedOffsetOutputUint8Dataset(),
128  framework::dataset::make("DataType", { DataType::QASYMM8 })))
129 {
130  // Validate output
131  validate(Accessor(_target), _reference);
132 }
133 
136 {
137  // Validate output
138  validate(Accessor(_target), _reference);
139 }
140 TEST_SUITE_END() // FusedOffsetOutput
141 TEST_SUITE_END() // MatrixMultiplyCore
142 
143 TEST_SUITE(OutputStage)
144 
145 TEST_SUITE(QuantizeDownInt32Scale)
146 
148 
149 const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
150  3)
151  * framework::dataset::make("min", 0) * framework::dataset::make("max", 255) * framework::dataset::make("addBias", { false, true });
152 
153 const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1,
154  2)
155  * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true });
156 
157 using NEGEMMLowpQuantizeDownInt32ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpOutputStage>;
158 
159 // *INDENT-OFF*
160 // clang-format off
162  framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
163  TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
164  }),
167  })),
170  })),
171  framework::dataset::make("Min",{ 0,
172  13,
173  })),
174  framework::dataset::make("Max",{ 205,
175  180,
176  })),
177  framework::dataset::make("Expected", { true, false })),
178  a_info, b_info, output_info, min, max, expected)
179 {
180 
183  output_stage.gemmlowp_min_bound = min;
184  output_stage.gemmlowp_max_bound = max;
185  output_stage.output_data_type = DataType::QASYMM8;
186 
187  // Lock tensors
188  Status status = NEGEMMLowpOutputStage::validate(&a_info.clone()->set_is_resizable(false),
189  &b_info.clone()->set_is_resizable(false),
190  &output_info.clone()->set_is_resizable(false),
191  output_stage);
193 }
194 // clang-format on
195 // *INDENT-ON*
196 
198 {
199  Tensor input1 = create_tensor<Tensor>(TensorShape(21U, 13U), DataType::S32);
200  Tensor input2 = create_tensor<Tensor>(TensorShape(21U, 1U), DataType::S32);
201  Tensor output = create_tensor<Tensor>(TensorShape(21U, 13U), DataType::QASYMM8);
202 
204  output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN;
205  output_stage.gemmlowp_min_bound = 0;
206  output_stage.gemmlowp_max_bound = 205;
207  output_stage.output_data_type = DataType::QASYMM8;
208 
210  f.configure(&input1, &input2, &output, output_stage);
211 
212  // Validate padding is zero
213  validate(input1.info()->padding(), PaddingSize());
214  validate(input2.info()->padding(), PaddingSize());
215  validate(output.info()->padding(), PaddingSize());
216 }
217 
218 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases))
219 {
220  // Validate output
221  validate(Accessor(_target), _reference);
222 }
223 
224 TEST_SUITE(BoundedReLu)
225 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases))
226 {
227  // Validate output
228  validate(Accessor(_target), _reference);
229 }
230 
231 TEST_SUITE_END() // BoundedReLu
232 
233 TEST_SUITE_END() // QASYMM8
234 
236 
237 const auto quantize_down_int32_to_int8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
238  3)
239  * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true });
240 
241 const auto quantize_down_int32_to_int8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1,
242  2)
243  * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", -100, -98) * framework::dataset::make("max", 71, 74) * framework::dataset::make("addBias", { false, true });
244 
245 using NEGEMMLowpQuantizeDownInt32ScaleFixture = GEMMLowpQuantizeDownInt32ToInt8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpOutputStage>;
246 
247 // *INDENT-OFF*
248 // clang-format off
250  framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
251  TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max
252  TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
253  }),
257  })),
261  })),
262  framework::dataset::make("Min",{ -10,
263  -200,
264  -113,
265  })),
266  framework::dataset::make("Max",{ 105,
267  300,
268  -18,
269  })),
270  framework::dataset::make("Expected", { true, false, false })),
271  a_info, b_info, output_info, min, max, expected)
272 {
275  output_stage.gemmlowp_min_bound = min;
276  output_stage.gemmlowp_max_bound = max;
278 
279  // Lock tensors
280  Status status = NEGEMMLowpOutputStage::validate(&a_info.clone()->set_is_resizable(false),
281  &b_info.clone()->set_is_resizable(false),
282  &output_info.clone()->set_is_resizable(false),
283  output_stage);
285 }
286 // clang-format on
287 // *INDENT-ON*
288 
289 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_cases))
290 {
291  // Validate output
292  validate(Accessor(_target), _reference);
293 }
294 
295 TEST_SUITE(BoundedReLu)
296 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_int8_scale_relu_cases))
297 {
298  // Validate output
299  validate(Accessor(_target), _reference);
300 }
301 
302 TEST_SUITE_END() // BoundedReLu
303 
304 TEST_SUITE_END() // QASYMM8_SIGNED
305 
306 TEST_SUITE_END() // QuantizeDownInt32Scale
307 
308 TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint)
309 
310 const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
311  2)
312  * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 255) * framework::dataset::make("addBias", { false, true });
313 
314 const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
315  2)
316  * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true });
317 
319  GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>;
320 
322  GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
323 
324 // *INDENT-OFF*
325 // clang-format off
327  framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
328  TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
329  }),
332  })),
335  })),
336  framework::dataset::make("Min",{ 0,
337  13,
338  })),
339  framework::dataset::make("Max",{ 205,
340  180,
341  })),
342  framework::dataset::make("Expected", { true, false })),
343  a_info, b_info, output_info, min, max, expected)
344 {
345  // Lock tensors
346  Status status = NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false),
347  &b_info.clone()->set_is_resizable(false),
348  &output_info.clone()->set_is_resizable(false),
349  min,
350  max);
352 }
353 // clang-format on
354 // *INDENT-ON*
355 
356 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
357  quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
358 {
359  // Validate output
360  validate(Accessor(_target), _reference);
361 }
362 
363 FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
364  quantize_down_int32_to_uint8_scale_by_fixedpoint_cases))
365 {
366  // Validate output
367  validate(Accessor(_target), _reference);
368 }
369 
370 TEST_SUITE(BoundedReLu)
371 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
372  quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
373 {
374  // Validate output
375  validate(Accessor(_target), _reference);
376 }
377 
378 FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(),
379  quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases))
380 {
381  // Validate output
382  validate(Accessor(_target), _reference);
383 }
384 TEST_SUITE_END() // BoundedReLu
385 
386 TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint
387 
388 TEST_SUITE(QuantizeDownInt32ToInt8ScaleByFixedPoint)
389 
390 const auto quantize_down_int32_to_int8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
391  2)
392  * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", -128) * framework::dataset::make("max", 128) * framework::dataset::make("addBias", { false, true });
393 
394 const auto quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
395  2)
396  * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
397 
399  GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint>;
400 
401 // *INDENT-OFF*
402 // clang-format off
404  framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::F32), // Invalid input data type
405  TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
407  }),
411  })),
415  })),
416  framework::dataset::make("Min",{ -110,
417  -113,
418  -113,
419  })),
420  framework::dataset::make("Max",{ 87,
421  97,
422  97,
423  })),
424  framework::dataset::make("Expected", { false, false, true })),
425  a_info, b_info, output_info, min, max, expected)
426 {
427  // Lock tensors
428  Status status = NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false),
429  &b_info.clone()->set_is_resizable(false),
430  &output_info.clone()->set_is_resizable(false),
431  min,
432  max);
434 }
435 // clang-format on
436 // *INDENT-ON*
437 
438 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
439  quantize_down_int32_to_int8_scale_by_fixedpoint_cases))
440 {
441  // Validate output
442  validate(Accessor(_target), _reference);
443 }
444 
445 TEST_SUITE(BoundedReLu)
446 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
447  quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases))
448 {
449  // Validate output
450  validate(Accessor(_target), _reference);
451 }
452 TEST_SUITE_END() // BoundedReLu
453 TEST_SUITE_END() // QuantizeDownInt32ToInt8ScaleByFixedPoint
454 
455 TEST_SUITE(QuantizeDownInt32ToInt16ScaleByFixedPoint)
456 
457 const auto quantize_down_int32_to_int16_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
458  2)
459  * framework::dataset::make("min", -32768) * framework::dataset::make("max", 32767) * framework::dataset::make("addBias", { false, true });
460 
461 const auto quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1,
462  2)
463  * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
464 const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases = framework::dataset::make("result_fixedpoint_multiplier", 1073741823,
465  1073741825)
466  * framework::dataset::make("result_shift", -3,
467  -2)
468  * framework::dataset::make("min", -32768) * framework::dataset::make("max", 32767) * framework::dataset::make("addBias", { false, true });
469 
470 const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600,
471  254601602)
472  * framework::dataset::make("result_shift", -3,
473  -1)
474  * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true });
475 
477  GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>;
478 
479 // *INDENT-OFF*
480 // clang-format off
482  framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16
483  TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type
484  }),
487  })),
490  })),
491  framework::dataset::make("Min",{ -205,
492  -180,
493  })),
494  framework::dataset::make("Max",{ 205,
495  180,
496  })),
497  framework::dataset::make("Expected", { true, false })),
498  a_info, b_info, output_info, min, max, expected)
499 {
500  // Lock tensors
501  Status status = NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false),
502  &b_info.clone()->set_is_resizable(false),
503  &output_info.clone()->set_is_resizable(false),
504  min,
505  max);
507 }
508 // clang-format on
509 // *INDENT-ON*
510 
511 TEST_SUITE(NoRelu)
512 TEST_SUITE(MultSmallerEq1)
513 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
514  quantize_down_int32_to_int16_scale_by_fixedpoint_cases))
515 {
516  // Validate output
517  validate(Accessor(_target), _reference);
518 }
519 TEST_SUITE_END() // MultSmallerEq1
520 TEST_SUITE(MultGreater1)
521 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
522  quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases))
523 {
524  // Validate output
525  validate(Accessor(_target), _reference);
526 }
527 TEST_SUITE_END() // MultGreater1
528 TEST_SUITE_END() // NoRelu
529 TEST_SUITE(BoundedReLu)
530 TEST_SUITE(MultSmallerEq1)
531 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
532  quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases))
533 {
534  // Validate output
535  validate(Accessor(_target), _reference);
536 }
537 TEST_SUITE_END() // MultSmallerEq1
538 TEST_SUITE(MultGreater1)
539 FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(),
540  quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases))
541 {
542  // Validate output
543  validate(Accessor(_target), _reference);
544 }
545 TEST_SUITE_END() // MultGreater1
546 TEST_SUITE_END() // BoundedReLu
547 TEST_SUITE_END() // QuantizeDownInt32ToInt16ScaleByFixedPoint
548 TEST_SUITE_END() // OutputStage
549 TEST_SUITE_END() // GEMMLowp
550 TEST_SUITE_END() // Neon
551 } // namespace validation
552 } // namespace test
553 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture< Tensor, Accessor, NEGEMMLowpOutputStage > NEGEMMLowpQuantizeDownInt32ScaleFixture
Definition: GEMMLowp.cpp:157
quantized, symmetric fixed-point 16-bit number
const auto quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases
Definition: GEMMLowp.cpp:236
const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases
Definition: GEMMLowp.cpp:274
SimpleTensor< float > b
Definition: DFT.cpp:157
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Status class.
Definition: Error.h:52
int32_t gemmlowp_max_bound
GEMMLowp max value used to saturate down the output result before converting back to QASYMM8...
Definition: Types.h:1959
GEMMLowpOutputStageType type
GEMMLowp output stage type.
Definition: Types.h:1954
Copyright (c) 2017-2021 Arm Limited.
GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture< Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint > NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture
Definition: GEMMLowp.cpp:322
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor&#39;s metadata.
Definition: Tensor.cpp:33
1 channel, 1 S32 per channel
virtual bool is_resizable() const =0
Flag indicating whether the size of the tensor can be changed.
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min=std::numeric_limits< int32_t >::lowest(), int max=std::numeric_limits< int32_t >::max())
Static function to check if given info will lead to a valid configuration of NEGEMMLowpQuantizeDownIn...
Accessor implementation for Tensor objects.
Definition: Accessor.h:35
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
const auto quantize_down_int32_to_uint8_scale_relu_cases
Definition: GEMMLowp.cpp:137
GEMMLowpMatrixMultiplyCoreValidationFixture< Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore > NEGEMMLowpMatrixMultiplyCoreFixture
Definition: GEMMLowp.cpp:52
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
GEMMLowp output stage info.
Definition: Types.h:1952
Basic implementation of the tensor interface.
Definition: Tensor.h:37
TEST_SUITE(U8_to_S8) FIXTURE_DATA_TEST_CASE(RunSmall
validate(CLAccessor(output_state), expected_output)
virtual PaddingSize padding() const =0
Padding of tensor.
virtual ITensorInfo & set_quantization_info(const QuantizationInfo &quantization_info)=0
Set the quantization settings (scale and offset) of the tensor.
BorderSize PaddingSize
Container for 2D padding size.
Definition: Types.h:382
Quantize using an integer multiplication.
static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min=std::numeric_limits< int32_t >::lowest(), int max=std::numeric_limits< int32_t >::max())
Static function to check if given info will lead to a valid configuration of NEGEMMLowpQuantizeDownIn...
GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture< Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore > NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture
Definition: GEMMLowp.cpp:125
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture< Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint > NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture
Definition: GEMMLowp.cpp:399
void configure(const ITensor *input, const ITensor *bias, ITensor *output, const GEMMLowpOutputStageInfo &info)
Initialise the kernel&#39;s inputs, output.
static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo &info)
Static function to check if given info will lead to a valid configuration of NEGEMMLowpOutputStage.
const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases
Definition: GEMMLowp.cpp:268
Basic function to execute GEMMLowpQuantizeDown kernels on Neon.
GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture< Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint > NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture
Definition: GEMMLowp.cpp:319
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:45
static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info=GEMMInfo())
Static function to check if given info will lead to a valid configuration of NEGEMMLowpMatrixMultiply...
JoinDataset< T, U > concat(T &&dataset1, U &&dataset2)
Helper function to create a JoinDataset.
Definition: JoinDataset.h:160
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
quantized, asymmetric fixed-point 8-bit number signed
int32_t gemmlowp_min_bound
GEMMLowp min value used to saturate down the output result before converting back to QASYMM8...
Definition: Types.h:1958
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
DataType output_data_type
Output tensor data type to use if the output is not initialized.
Definition: Types.h:1964
DataType
Available data types.
Definition: Types.h:77
Basic function to execute GEMMLowpMatrixMultiplyCore on Neon.
const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases
Definition: GEMMLowp.cpp:196
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65
const auto quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases
Definition: GEMMLowp.cpp:264
const auto quantize_down_int32_to_int8_scale_relu_cases
Definition: GEMMLowp.cpp:165
static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min=std::numeric_limits< int32_t >::lowest(), int max=std::numeric_limits< int32_t >::max())
Static function to check if given info will lead to a valid configuration of NEGEMMLowpQuantizeDownIn...