Compute Library
 20.08
BatchNormalizationLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2019 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
30 #include "tests/CL/CLAccessor.h"
32 #include "tests/datasets/LargeConvolutionLayerDataset.h"
33 #include "tests/datasets/RandomBatchNormalizationLayerDataset.h"
34 #include "tests/datasets/SmallConvolutionLayerDataset.h"
36 #include "tests/framework/Macros.h"
40 #include "tests/validation/fixtures/BatchNormalizationLayerFixture.h"
41 #include "tests/validation/fixtures/BatchNormalizationLayerFusionFixture.h"
42 
43 namespace arm_compute
44 {
45 namespace test
46 {
47 namespace validation
48 {
49 namespace
50 {
51 RelativeTolerance<float> rel_tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
52 constexpr AbsoluteTolerance<float> abs_tolerance_f32(0.0001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
53 constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
54 const auto act_infos = framework::dataset::make("ActivationInfo",
55 {
58  ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f),
59 });
60 
61 const auto common_fusion_dataset = combine(combine(combine(framework::dataset::make("UseBias", { false, true }),
62  framework::dataset::make("UseBeta", { false, true })),
63  framework::dataset::make("UseGamma", { false, true })),
64  framework::dataset::make("Epsilon", { 0.001f }));
65 } // namespace
66 
67 TEST_SUITE(CL)
68 TEST_SUITE(BatchNormalizationLayer)
69 
70 template <typename T>
71 using CLBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>;
72 
73 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(),
74  combine(framework::dataset::make("UseBeta", { false, true }),
75  framework::dataset::make("UseGamma", { false, true }))),
78  shape0, shape1, epsilon, use_gamma, use_beta, dt, data_layout)
79 {
80  TensorShape src_dst_shapes = shape0;
82  {
83  permute(src_dst_shapes, PermutationVector(2U, 0U, 1U));
84  }
85 
86  // Create tensors
87  CLTensor src = create_tensor<CLTensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
88  CLTensor dst = create_tensor<CLTensor>(src_dst_shapes, dt, 1, QuantizationInfo(), data_layout);
89  CLTensor mean = create_tensor<CLTensor>(shape1, dt, 1);
90  CLTensor var = create_tensor<CLTensor>(shape1, dt, 1);
91  CLTensor beta = create_tensor<CLTensor>(shape1, dt, 1);
92  CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1);
93 
94  // Create and Configure function
95  CLBatchNormalizationLayer norm;
96  CLTensor *beta_ptr = use_beta ? &beta : nullptr;
97  CLTensor *gamma_ptr = use_gamma ? &gamma : nullptr;
98  norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon);
99 
100  // Validate valid region
101  const ValidRegion valid_region = shape_to_valid_region(src_dst_shapes);
103 }
104 
105 // *INDENT-OFF*
106 // clang-format off
108  framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
109  TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink
110  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types
111  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching data types
112  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape
113  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Unsupported fused activation
114  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Fused activation's a < b
115  }),
116  framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
117  TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
118  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
119  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F16),
120  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
121  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
122  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
123  })),
131  })),
132  framework::dataset::make("ActivationLayerInfo",{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
138  ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 6.f),
139  })),
140  framework::dataset::make("Expected", { true, false, false, false, false, false, false})),
142 {
143  const auto &mean_info = mvbg_info;
144  const auto &var_info = mvbg_info;
145  const auto &beta_info = mvbg_info;
146  const auto &gamma_info = mvbg_info;
147  bool has_error = bool(CLBatchNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), &mean_info.clone()->set_is_resizable(false), &var_info.clone()->set_is_resizable(false), &beta_info.clone()->set_is_resizable(false), &gamma_info.clone()->set_is_resizable(false), 1.f, act_info));
149 }
150 // clang-format on
151 // *INDENT-ON*
152 
153 TEST_SUITE(Float)
154 TEST_SUITE(FP32)
155 FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(),
156  combine(framework::dataset::make("UseBeta", { false, true }),
157  framework::dataset::make("UseGamma", { false, true }))),
158  act_infos),
161 {
162  // Validate output
163  validate(CLAccessor(_target), _reference, abs_tolerance_f32, 0);
164 }
165 TEST_SUITE_END() //FP32
166 
167 TEST_SUITE(FP16)
168 FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(),
169  combine(framework::dataset::make("UseBeta", { false, true }),
170  framework::dataset::make("UseGamma", { false, true }))),
174 {
175  // Validate output
176  validate(CLAccessor(_target), _reference, tolerance_f16, 0);
177 }
178 TEST_SUITE_END() // FP16
179 TEST_SUITE_END() // Float
180 
181 TEST_SUITE_END() // BatchNormalizationLayer
182 
183 TEST_SUITE(BatchNormalizationLayerFusion)
184 // *INDENT-OFF*
185 // clang-format off
186 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
187  framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), // Valid
188  TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), // Mismatching data types
189  TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), // Invalid mean/var/beta/gamma shape
190  }),
191  framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32),
192  TensorInfo(TensorShape(2U), 1, DataType::F16),
193  TensorInfo(TensorShape(5U), 1, DataType::F32),
194  })),
195  framework::dataset::make("Expected", { true, false, false})),
197 {
198  const auto &weights_in_info = weights_info;
199  const auto &mean_info = mvbg_info;
200  const auto &var_info = mvbg_info;
202  const auto &fused_bias_info = mvbg_info;
203  const auto &conv_bias_info = mvbg_info;
204  const auto &beta_info = mvbg_info;
205  const auto &gamma_info = mvbg_info;
207  &weights_in_info.clone()->set_is_resizable(false), &mean_info.clone()->set_is_resizable(false),
208  &var_info.clone()->set_is_resizable(false), &fused_weights_info.clone()->set_is_resizable(false),
209  &fused_bias_info.clone()->set_is_resizable(false), &conv_bias_info.clone()->set_is_resizable(false),
210  &beta_info.clone()->set_is_resizable(false), &gamma_info.clone()->set_is_resizable(false), 1.f));
212 }
213 // clang-format on
214 // *INDENT-ON*
215 template <typename T>
216 using CLBatchNormalizationLayerFusionFixture = BatchNormalizationLayerFusionValidationFixture<CLTensor, CLAccessor, CLConvolutionLayer, CLFuseBatchNormalization, T>;
217 
218 TEST_SUITE(Float)
219 TEST_SUITE(FP32)
221  combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(), common_fusion_dataset),
222  framework::dataset::make("DataType", DataType::F32)),
223  framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
224 {
225  // Validate output
226  validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
227 }
229  combine(combine(combine(datasets::SmallConvolutionLayerDataset(), common_fusion_dataset),
232 {
233  // Validate output
234  validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32);
235 }
236 TEST_SUITE_END() // FP32
237 TEST_SUITE_END() // Float
238 
239 TEST_SUITE_END() // BatchNormalizationLayerFusion
240 TEST_SUITE_END() // CL
241 } // namespace validation
242 } // namespace test
243 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
const DataLayout data_layout
Definition: Im2Col.cpp:146
TensorInfo * info() const override
Interface to be implemented by the child class to return the tensor's metadata.
Definition: CLTensor.cpp:41
ValidRegion valid_region() const override
Valid region of the tensor.
Definition: TensorInfo.h:303
BatchNormalizationLayerValidationFixture< CLTensor, CLAccessor, CLBatchNormalizationLayer, T > CLBatchNormalizationLayerFixture
half_float::half half
16-bit floating point type
Definition: Types.h:46
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
Strides PermutationVector
Permutation vector.
Definition: Types.h:49
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Activation Layer Information class.
Definition: Types.h:1517
static Status validate(const ITensorInfo *input_weights, const ITensorInfo *bn_mean, const ITensorInfo *bn_var, const ITensorInfo *fused_weights, const ITensorInfo *fused_bias, const ITensorInfo *input_bias=nullptr, const ITensorInfo *bn_beta=nullptr, const ITensorInfo *bn_gamma=nullptr, float epsilon=0.001f, FuseBatchNormalizationType fbn_type=FuseBatchNormalizationType::CONVOLUTION)
Static function to check if given info will lead to a valid configuration of CLFuseBatchNormalization...
Copyright (c) 2017-2020 Arm Limited.
1 channel, 1 F16 per channel
void permute(Dimensions< T > &dimensions, const PermutationVector &perm)
Permutes given Dimensions according to a permutation vector.
Definition: Helpers.h:605
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
Accessor implementation for CLTensor objects.
Definition: CLAccessor.h:35
Num samples, channels, height, width.
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
validate(dst.info() ->valid_region(), valid_region)
Num samples, height, width, channels.
Store the tensor's metadata.
Definition: TensorInfo.h:45
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *var, const ITensorInfo *beta=nullptr, const ITensorInfo *gamma=nullptr, float epsilon=0.001f, ActivationLayerInfo act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of CLBatchNormalizationLaye...
RelativeTolerance< half_float::half > tolerance_f16(half(0.2))
F16 Tolerance value for comparing reference's output against implementation's output for floating poi...
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
DataType
Available data types.
Definition: Types.h:77
constexpr float abs_tolerance_f32(0.0001f)
F32 Absolute tolerance value for comparing reference's output against implementation's output for flo...
DataLayout
[DataLayout enum definition]
Definition: Types.h:120
BatchNormalizationLayerFusionValidationFixture< CLTensor, CLAccessor, CLConvolutionLayer, CLFuseBatchNormalization, T > CLBatchNormalizationLayerFusionFixture
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65
TEST_SUITE(U8_to_S8) DATA_TEST_CASE(Configuration
ValidRegion shape_to_valid_region(const TensorShape &a_shape, bool border_undefined=false, BorderSize border_size=BorderSize(0))
Create a valid region based on tensor shape, border mode and border size.
Definition: Utils.h:225
cast configure & src
Definition: Cast.cpp:169
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), AbsoluteDifferenceU8Dataset), shape, data_type0, data_type1, output_data_type)