Compute Library
 22.11
ReductionOperation.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
28 #include "tests/CL/CLAccessor.h"
30 #include "tests/datasets/ShapeDatasets.h"
32 #include "tests/framework/Macros.h"
35 #include "tests/validation/fixtures/ReductionOperationFixture.h"
36 
37 namespace arm_compute
38 {
39 namespace test
40 {
41 namespace validation
42 {
43 namespace
44 {
45 /** Tolerance for float operations */
46 AbsoluteTolerance<float> tolerance_f32(0.001f);
47 RelativeTolerance<float> rel_tolerance_f32(0.00001f);
48 AbsoluteTolerance<float> tolerance_f16(0.5f);
49 RelativeTolerance<float> rel_tolerance_f16(0.2f);
50 /** Tolerance for quantized operations */
51 RelativeTolerance<float> tolerance_qasymm8(1);
52 
53 const auto ReductionOperationsSumProdMean = framework::dataset::make("ReductionOperationsSumProdMean",
54 {
58 
59 });
60 const auto ReductionOperationsMinMax = framework::dataset::make("ReductionMinMax",
61 {
64 });
65 
66 const auto KeepDimensions = framework::dataset::make("KeepDims", { true, false });
67 } // namespace
68 
69 TEST_SUITE(CL)
71 
72 // *INDENT-OFF*
73 // clang-format off
74 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
75  framework::dataset::make("InputInfo", { TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Mismatching data type input/output
76  TensorInfo(TensorShape(128U, 64U), 3, DataType::F32), // Number of Input channels != 1
77  TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != QASYMM8/F16/F32
78  TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions
79  TensorInfo(TensorShape(128U, 64U), 1, DataType::QASYMM8), // Axis == 0 and SUM_SQUARE and QASYMM8
81  TensorInfo(TensorShape(128U, 64U), 1, DataType::F32) // Kept Dimension when keep_dims = false
82 
83  }),
91  })),
92  framework::dataset::make("Axis", { 0U, 0U, 0U, static_cast<unsigned int>(TensorShape::num_max_dimensions), 1U, 0U, 0U })),
93  framework::dataset::make("KeepDims", { true, true, true, true, true, true, false })),
94  framework::dataset::make("Expected", { false, false, false, false, false, true , false })),
95  input_info, output_info, axis, keep_dims, expected)
96 {
97  bool is_valid = bool(CLReductionOperation::validate(&input_info.clone()->set_is_resizable(false),
98  &output_info.clone()->set_is_resizable(true),
99  axis,
101  keep_dims));
103 }
104 // clang-format on
105 // *INDENT-ON*
106 
107 template <typename T>
108 using CLReductionOperationFixture = ReductionOperationFixture<CLTensor, CLAccessor, CLReductionOperation, T>;
109 
110 TEST_SUITE(Float)
111 TEST_SUITE(FP16)
113  combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0, 1, 2, 3 })),
114  concat(ReductionOperationsSumProdMean,
115  ReductionOperationsMinMax)),
116  KeepDimensions))
117 {
118  // Validate output
119  validate(CLAccessor(_target), _reference, tolerance_f16);
120 }
122  combine(combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), concat(ReductionOperationsSumProdMean,
123  ReductionOperationsMinMax)),
124  KeepDimensions))
125 {
126  // Validate output
127  validate(CLAccessor(_target), _reference, rel_tolerance_f16, 0, tolerance_f16);
128 }
129 TEST_SUITE_END() // F16
130 TEST_SUITE(FP32)
131 FIXTURE_DATA_TEST_CASE(RunSmall4D, CLReductionOperationFixture<float>, framework::DatasetMode::PRECOMMIT,
132  combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })),
133  concat(ReductionOperationsSumProdMean,
134  ReductionOperationsMinMax)),
135  KeepDimensions))
136 {
137  // Validate output
138  validate(CLAccessor(_target), _reference, tolerance_f32);
139 }
141  combine(combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), concat(ReductionOperationsSumProdMean,
142  ReductionOperationsMinMax)),
143  KeepDimensions))
144 {
145  // Validate output
146  validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0, tolerance_f32);
147 }
148 TEST_SUITE_END() // F32
149 TEST_SUITE_END() // Float
150 
151 template <typename T>
152 using CLReductionOperationQuantizedFixture = ReductionOperationQuantizedFixture<CLTensor, CLAccessor, CLReductionOperation, T>;
153 
154 TEST_SUITE(Quantized)
156 FIXTURE_DATA_TEST_CASE(RunSmall, CLReductionOperationQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
157  combine(combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("Axis", { 0, 1, 2, 3 })),
158  ReductionOperationsSumProdMean),
159  framework::dataset::make("QuantizationInfo", QuantizationInfo(1.f / 64, 2))),
160  KeepDimensions))
161 {
162  // Validate output
163  validate(CLAccessor(_target), _reference, tolerance_qasymm8);
164 }
165 FIXTURE_DATA_TEST_CASE(RunSmallMinMax, CLReductionOperationQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
166  combine(combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("Axis", { 0, 1, 2, 3 })),
167  ReductionOperationsMinMax),
168  framework::dataset::make("QuantizationInfo", QuantizationInfo(1.f / 64, 2))),
169  KeepDimensions))
170 {
171  // Validate output
172  validate(CLAccessor(_target), _reference);
173 }
174 TEST_SUITE_END() // QASYMM8
176 FIXTURE_DATA_TEST_CASE(RunSmall, CLReductionOperationQuantizedFixture<int8_t>, framework::DatasetMode::ALL,
177  combine(combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), framework::dataset::make("Axis", { 0, 1, 2, 3 })),
178  ReductionOperationsSumProdMean),
179  framework::dataset::make("QuantizationInfo", QuantizationInfo(1.f / 64, 2))),
180  KeepDimensions))
181 {
182  // Validate output
183  validate(CLAccessor(_target), _reference, tolerance_qasymm8);
184 }
186  combine(combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), framework::dataset::make("Axis", { 0, 1, 2, 3 })),
187  ReductionOperationsMinMax),
188  framework::dataset::make("QuantizationInfo", QuantizationInfo(1.f / 64, 2))),
189  KeepDimensions))
190 {
191  // Validate output
192  validate(CLAccessor(_target), _reference);
193 }
194 TEST_SUITE_END() // QASYMM8_SIGNED
195 TEST_SUITE_END() // Quantized
196 TEST_SUITE_END() // Reduction
197 TEST_SUITE_END() // CL
198 } // namespace validation
199 } // namespace test
200 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
ReductionOperation
Available reduction operations.
Definition: Types.h:476
half_float::half half
16-bit floating point type
Definition: Types.h:48
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
ReductionOperationFixture< CLTensor, CLAccessor, CLReductionOperation, T > CLReductionOperationFixture
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
RelativeTolerance< half_float::half > tolerance_f16(half_float::half(0.1))
Tolerance value for comparing reference&#39;s output against implementation&#39;s output for DataType::F16...
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
Accessor implementation for CLTensor objects.
Definition: CLAccessor.h:36
RelativeTolerance< float > tolerance_f32(0.01f)
Tolerance value for comparing reference&#39;s output against implementation&#39;s output for DataType::F32...
validate(CLAccessor(output_state), expected_output)
static Status validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, bool keep_dims=true)
Static function to check if given info will lead to a valid configuration of CLReductionOperation.
1 channel, 1 S16 per channel
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
JoinDataset< T, U > concat(T &&dataset1, U &&dataset2)
Helper function to create a JoinDataset.
Definition: JoinDataset.h:160
quantized, asymmetric fixed-point 8-bit number signed
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
static constexpr size_t num_max_dimensions
Number of dimensions the tensor has.
Definition: Dimensions.h:46
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
DataType
Available data types.
Definition: Types.h:79
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65