Compute Library
 22.11
BatchNormalizationLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
30 #include "tests/NEON/Accessor.h"
32 #include "tests/datasets/RandomBatchNormalizationLayerDataset.h"
33 #include "tests/datasets/ShapeDatasets.h"
34 #include "tests/datasets/SmallConvolutionLayerDataset.h"
36 #include "tests/framework/Macros.h"
40 #include "tests/validation/fixtures/BatchNormalizationLayerFixture.h"
41 #include "tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h"
42 
43 namespace arm_compute
44 {
45 namespace test
46 {
47 namespace validation
48 {
49 namespace
50 {
51 RelativeTolerance<float> rel_tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
52 constexpr AbsoluteTolerance<float> abs_tolerance_f32(0.0001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
53 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
54 constexpr AbsoluteTolerance<float> abs_tolerance_f16(0.015f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
55 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
56 
57 const auto act_infos = framework::dataset::make("ActivationInfo",
58 {
61  ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f),
62 });
63 const auto common_fusion_dataset = combine(combine(combine(framework::dataset::make("UseBias", { false, true }),
64  framework::dataset::make("UseBeta", { false, true })),
65  framework::dataset::make("UseGamma", { false, true })),
66  framework::dataset::make("Epsilon", { 0.001f }));
67 } // namespace
68 
69 TEST_SUITE(NEON)
70 TEST_SUITE(BatchNormalizationLayer)
71 
72 template <typename T>
73 using NEBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>;
74 
75 // *INDENT-OFF*
76 // clang-format off
78  framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
79  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types
80  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types
81  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape
82  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b
83  }),
84  framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
85  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
86  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16),
87  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
88  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
89  })),
95  })),
101  })),
102  framework::dataset::make("Expected", { true, false, false, false, false})),
104 {
105  const auto &mean_info = mvbg_info;
106  const auto &var_info = mvbg_info;
107  const auto &beta_info = mvbg_info;
108  const auto &gamma_info = mvbg_info;
110  &input_info.clone()->set_is_resizable(false), output_info.total_size() ? &output_info.clone()->set_is_resizable(false) : nullptr,
111  &mean_info.clone()->set_is_resizable(false), &var_info.clone()->set_is_resizable(false),
112  &beta_info.clone()->set_is_resizable(false), &gamma_info.clone()->set_is_resizable(false), 1.f, act_info));
114 }
115 // clang-format on
116 // *INDENT-ON*
117 
118 TEST_SUITE(Float)
119 TEST_SUITE(FP32)
120 FIXTURE_DATA_TEST_CASE(RandomSmall, NEBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(),
121  combine(framework::dataset::make("UseBeta", { false, true }),
122  framework::dataset::make("UseGamma", { false, true }))),
123  act_infos),
126 {
127  // Validate output
128  validate(Accessor(_target), _reference, abs_tolerance_f32, 0);
129 }
131  combine(framework::dataset::make("UseBeta", { false, true }),
132  framework::dataset::make("UseGamma", { false, true }))),
133  act_infos),
136 {
137  // Validate output
138  validate(Accessor(_target), _reference, abs_tolerance_f32, 0);
139 }
140 TEST_SUITE_END() // F32
141 
142 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
143 TEST_SUITE(FP16)
144 FIXTURE_DATA_TEST_CASE(RandomSmall, NEBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(),
145  combine(framework::dataset::make("UseBeta", { false, true }),
146  framework::dataset::make("UseGamma", { false, true }))),
147  framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
150 {
151  // Validate output
152  validate(Accessor(_target), _reference, abs_tolerance_f16, 0);
153 }
154 
155 FIXTURE_DATA_TEST_CASE(RandomLarge, NEBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::LargeRandomBatchNormalizationLayerDataset(),
156  combine(framework::dataset::make("UseBeta", { false, true }),
157  framework::dataset::make("UseGamma", { false, true }))),
158  framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
161 {
162  // Validate output
163  validate(Accessor(_target), _reference, abs_tolerance_f16, 0);
164 }
165 TEST_SUITE_END() // FP16
166 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
167 TEST_SUITE_END() // Float
168 
169 TEST_SUITE_END() // BatchNormalizationLayer
170 
171 TEST_SUITE(BatchNormalizationLayerFusion)
172 template <typename T>
173 using NEBatchNormalizationLayerFusionFixture = BatchNormalizationLayerFusionValidationFixture<Tensor, Accessor, NEConvolutionLayer, NEFuseBatchNormalization, T>;
174 
175 // *INDENT-OFF*
176 // clang-format off
178  framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), // Valid
179  TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), // Mismatching data types
180  TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F16), // Mismatching data types
181  TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape
182  }),
187  })),
188  framework::dataset::make("Expected", { true, false, false, false})),
190 {
191  const auto &weights_in_info = weights_info;
192  const auto &mean_info = mvbg_info;
193  const auto &var_info = mvbg_info;
194  const auto &fused_weights_info = weights_info;
195  const auto &fused_bias_info = mvbg_info;
196  const auto &conv_bias_info = mvbg_info;
197  const auto &beta_info = mvbg_info;
198  const auto &gamma_info = mvbg_info;
200  &weights_in_info.clone()->set_is_resizable(false), &mean_info.clone()->set_is_resizable(false),
201  &var_info.clone()->set_is_resizable(false), &fused_weights_info.clone()->set_is_resizable(false),
202  &fused_bias_info.clone()->set_is_resizable(false), &conv_bias_info.clone()->set_is_resizable(false),
203  &beta_info.clone()->set_is_resizable(false), &gamma_info.clone()->set_is_resizable(false), 1.f));
205 }
206 // clang-format on
207 // *INDENT-ON*
208 
209 TEST_SUITE(Float)
210 TEST_SUITE(FP32)
211 FIXTURE_DATA_TEST_CASE(RunSmall, NEBatchNormalizationLayerFusionFixture<float>, framework::DatasetMode::PRECOMMIT,
212  combine(combine(combine(datasets::SmallConvolutionLayerDataset(), common_fusion_dataset),
213  framework::dataset::make("DataType", DataType::F32)),
214  framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
215 {
216  // Validate output
217  validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
218 }
219 TEST_SUITE_END() // FP32
220 TEST_SUITE_END() // Float
221 
222 TEST_SUITE_END() // BatchNormalizationLayerFusion
223 TEST_SUITE_END() // Neon
224 } // namespace validation
225 } // namespace test
226 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Activation Layer Information class.
Definition: Types.h:1639
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *var, const ITensorInfo *beta=nullptr, const ITensorInfo *gamma=nullptr, float epsilon=0.001f, ActivationLayerInfo act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of NEBatchNormalizationLaye...
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
Accessor implementation for Tensor objects.
Definition: Accessor.h:35
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
validate(CLAccessor(output_state), expected_output)
Num samples, channels, height, width.
static Status validate(const ITensorInfo *input_weights, const ITensorInfo *bn_mean, const ITensorInfo *bn_var, const ITensorInfo *fused_weights, const ITensorInfo *fused_bias, const ITensorInfo *input_bias=nullptr, const ITensorInfo *bn_beta=nullptr, const ITensorInfo *bn_gamma=nullptr, float epsilon=0.001f, FuseBatchNormalizationType fbn_type=FuseBatchNormalizationType::CONVOLUTION)
Static function to check if given info will lead to a valid configuration of NEFuseBatchNormalization...
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
Num samples, height, width, channels.
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
DataType
Available data types.
Definition: Types.h:79
constexpr float abs_tolerance_f32(0.0001f)
F32 Absolute tolerance value for comparing reference&#39;s output against implementation&#39;s output for flo...
DataLayout
[DataLayout enum definition]
Definition: Types.h:113
BatchNormalizationLayerValidationFixture< Tensor, Accessor, NEBatchNormalizationLayer, T > NEBatchNormalizationLayerFixture
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65