Compute Library
 22.05
ReduceMean.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
28 
29 #include "tests/NEON/Accessor.h"
30 #include "tests/datasets/ShapeDatasets.h"
31 #include "tests/datasets/SplitDataset.h"
33 #include "tests/framework/Macros.h"
35 #include "tests/validation/fixtures/ReduceMeanFixture.h"
36 
37 namespace arm_compute
38 {
39 namespace test
40 {
41 namespace validation
42 {
43 namespace
44 {
45 constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */
46 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
47 constexpr AbsoluteTolerance<float> tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */
48 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
49 constexpr AbsoluteTolerance<uint8_t> tolerance_u8(1); /**< Tolerance value for comparing reference's output against implementation's output for unsigned 8-bit asymmetric quantized type */
50 constexpr AbsoluteTolerance<int8_t> tolerance_s8(2); /**< Tolerance value for comparing reference's output against implementation's output for signed 8-bit asymmetric quantized type */
51 
52 const auto axis_keep = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1, 0), Coordinates(1, 2), Coordinates(0, 2), Coordinates(1, 3), Coordinates(0, 1, 2, 3) }),
53  framework::dataset::make("KeepDims", { true }));
54 const auto axis_drop = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1), Coordinates(3) }), framework::dataset::make("KeepDims", { false }));
55 } // namespace
56 TEST_SUITE(NEON)
57 TEST_SUITE(ReduceMean)
58 
59 // *INDENT-OFF*
60 // clang-format off
62  framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 3U, 16U, 2U), 1, DataType::F32), // Invalid axis
63  TensorInfo(TensorShape(27U, 3U, 16U, 2U), 1, DataType::F32), // Invalid output shape
64  TensorInfo(TensorShape(32U, 16U, 16U, 2U), 1, DataType::F32),// OK
65  TensorInfo(TensorShape{228U, 19U, 2U, 2U}, 1, DataType::F32),// OK
66  TensorInfo(TensorShape{228U, 19U, 2U, 1U}, 1, DataType::F32) // Cannot support axis 3 not valid
67  }),
68  framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(27U, 3U, 1U, 2U), 1, DataType::F32),
69  TensorInfo(TensorShape(27U, 3U, 1U, 2U), 1, DataType::F32),
70  TensorInfo(TensorShape(32U, 16U, 1U, 2U), 1, DataType::F32),
71  TensorInfo(TensorShape(19U), 1, DataType::F32),
72  TensorInfo(TensorShape(19U), 1, DataType::F32)
73 
74  })),
75  framework::dataset::make("Axis", { Coordinates(4), Coordinates(0,2), Coordinates(2), Coordinates(3,2,0), Coordinates(3,2,0) })),
76  framework::dataset::make("Keep", { true, true, true, false, false })),
77  framework::dataset::make("Expected", { false, false, true, true, false })),
78  input_info, output_info, axis, keep, expected)
79 {
80  const Status status = NEReduceMean::validate(&input_info.clone()->set_is_resizable(false), axis, keep, &output_info.clone()->set_is_resizable(false));
82 }
83 // clang-format on
84 // *INDENT-ON*
85 
86 template <typename T>
87 using NEReduceMeanFixture = ReduceMeanFixture<Tensor, Accessor, NEReduceMean, T>;
88 
89 TEST_SUITE(Float)
90 
91 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
92 TEST_SUITE(FP16)
93 FIXTURE_DATA_TEST_CASE(RunSmall,
96  combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), concat(axis_keep, axis_drop)))
97 {
98  // Validate output
99  validate(Accessor(_target), _reference, tolerance_f16);
100 }
101 
102 FIXTURE_DATA_TEST_CASE(RunLarge,
105  combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F16)), concat(axis_keep, axis_drop)))
106 {
107  // Validate output
108  validate(Accessor(_target), _reference, tolerance_f16);
109 }
110 TEST_SUITE_END() // FP16
111 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
112 TEST_SUITE(FP32)
114  NEReduceMeanFixture<float>,
115  framework::DatasetMode::PRECOMMIT,
116  combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), concat(axis_keep, axis_drop)))
117 {
118  // Validate output
119  validate(Accessor(_target), _reference, tolerance_f32);
120 }
121 
125  combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F32)), concat(axis_keep, axis_drop)))
126 {
127  // Validate output
128  validate(Accessor(_target), _reference, tolerance_f32);
129 }
130 TEST_SUITE_END() // FP32
131 TEST_SUITE_END() // Float
132 
133 template <typename T>
134 using NEReduceMeanQuantizedFixture = ReduceMeanQuantizedFixture<Tensor, Accessor, NEReduceMean, T>;
135 
136 TEST_SUITE(Quantized)
139  NEReduceMeanQuantizedFixture<uint8_t>,
140  framework::DatasetMode::PRECOMMIT,
141  combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)),
142  framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.f / 255, 5) })),
143  framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.f / 255, 5) })))
144 {
145  // Validate output
146  validate(Accessor(_target), _reference, tolerance_u8);
147 }
148 
149 TEST_SUITE(Requant)
151  NEReduceMeanQuantizedFixture<uint8_t>,
152  framework::DatasetMode::PRECOMMIT,
153  combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), axis_drop),
154  framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.f / 255, 5) })),
155  framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.f / 200, 16) })))
156 {
157  // Validate output
158  validate(Accessor(_target), _reference, tolerance_u8);
159 }
160 TEST_SUITE_END() // Requant
161 
162 FIXTURE_DATA_TEST_CASE(RunLarge,
165  combine(combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)),
166  framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.f / 255, 5) })),
167  framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.f / 255, 5) })))
168 {
169  // Validate output
170  validate(Accessor(_target), _reference, tolerance_u8);
171 }
172 TEST_SUITE_END() // QASYMM8
173 
175 FIXTURE_DATA_TEST_CASE(RunSmall,
178  combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), concat(axis_keep, axis_drop)),
179  framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.f / 127, -10), QuantizationInfo(1.f / 250, -20) })),
180  framework::dataset::make("QuantizationInfoInputOutput", { QuantizationInfo(1.f / 127, -10) })))
181 {
182  // Validate output
183  validate(Accessor(_target), _reference, tolerance_s8);
184 }
185 TEST_SUITE(Requant)
187  NEReduceMeanQuantizedFixture<int8_t>,
188  framework::DatasetMode::PRECOMMIT,
189  combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), axis_drop),
190  framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.f / 102, 2) })),
191  framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.f / 113, 10) })))
192 {
193  // Validate output
194  validate(Accessor(_target), _reference, tolerance_s8);
195 }
196 TEST_SUITE_END() // Requant
197 
198 FIXTURE_DATA_TEST_CASE(RunLarge,
201  combine(combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), concat(axis_keep, axis_drop)),
202  framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.f / 127, -10) })),
203  framework::dataset::make("QuantizationInfoInputOutput", { QuantizationInfo(1.f / 127, -10) })))
204 {
205  // Validate output
206  validate(Accessor(_target), _reference, tolerance_s8);
207 }
208 TEST_SUITE_END() // QASYMM8_SIGNED
209 TEST_SUITE_END() // Quantized
210 TEST_SUITE_END() // ReduceMean
211 TEST_SUITE_END() // Neon
212 } // namespace validation
213 } // namespace test
214 } // namespace arm_compute
RelativeTolerance< float > tolerance_f32(0.001f)
F32 Tolerance value for comparing reference&#39;s output against implementation&#39;s output for floating poi...
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
ReduceMeanFixture< Tensor, Accessor, NEReduceMean, T > NEReduceMeanFixture
Definition: ReduceMean.cpp:87
Accessor implementation for Tensor objects.
Definition: Accessor.h:35
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
validate(CLAccessor(output_state), expected_output)
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
JoinDataset< T, U > concat(T &&dataset1, U &&dataset2)
Helper function to create a JoinDataset.
Definition: JoinDataset.h:160
quantized, asymmetric fixed-point 8-bit number signed
RelativeTolerance< half_float::half > tolerance_f16(half(0.2))
F16 Tolerance value for comparing reference&#39;s output against implementation&#39;s output for floating poi...
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
DataType
Available data types.
Definition: Types.h:79
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65
static Status validate(const ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims, const ITensorInfo *output)
Static function to check if given info will lead to a valid configuration of NEReduceMean.