Compute Library
 23.11
InstanceNormalizationLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
28 #include "tests/NEON/Accessor.h"
30 #include "tests/datasets/ShapeDatasets.h"
32 #include "tests/framework/Macros.h"
35 #include "tests/validation/fixtures/InstanceNormalizationLayerFixture.h"
36 
37 namespace arm_compute
38 {
39 namespace test
40 {
41 namespace validation
42 {
43 namespace
44 {
45 /** Tolerance for float operations */
46 AbsoluteTolerance<float> tolerance_f32(0.0015f);
47 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
48 // This precision is chosen based on the precision float16_t can provide
49 // for the decimal numbers between 16 and 32 and decided based on multiple
50 // times of execution of tests. Although, with randomly generated numbers
51 // there is no gaurantee that this tolerance will be always large enough.
52 AbsoluteTolerance<half> tolerance_f16(static_cast<half>(0.015625f));
53 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
54 } // namespace
55 
56 TEST_SUITE(NEON)
57 TEST_SUITE(InstanceNormalizationLayer)
58 
59 // *INDENT-OFF*
60 // clang-format off
62  framework::dataset::make("InputInfo", { TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching data type input/output
63  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching shape input/output
64  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 2, DataType::F32), // Number of Input channels != 1
65  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), // DataType != F32
66  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW),
67  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC),
68  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
69  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
70  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
71  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32)
72  }),
73  framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F16),
74  TensorInfo(TensorShape(256U, 64U, 32U, 4U), 1, DataType::F32),
75  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
76  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16),
77  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NCHW),
78  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32, DataLayout::NHWC),
79  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
80  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
81  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
82  TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32)
83  })),
84  framework::dataset::make("Expected", { false, false, false, false, true, true, true, true, true, true })),
86 {
87  bool is_valid = bool(NEInstanceNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false),
88  &output_info.clone()->set_is_resizable(false)
89  ));
91 }
92 // clang-format on
93 // *INDENT-ON*
94 
95 template <typename T>
96 using NEInstanceNormalizationLayerFixture = InstanceNormalizationLayerValidationFixture<Tensor, Accessor, NEInstanceNormalizationLayer, T>;
97 
98 TEST_SUITE(FP32)
100  combine(combine(combine(datasets::Small4DShapes(),
101  framework::dataset::make("DataType", DataType::F32)),
102  framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
103  framework::dataset::make("InPlace", { false, true })))
104 {
105  // Validate output
106  validate(Accessor(_target), _reference, tolerance_f32);
107 }
108 
109 TEST_SUITE_END() // FP32
110 
111 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
112 TEST_SUITE(FP16)
113 FIXTURE_DATA_TEST_CASE(RunSmall, NEInstanceNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT,
114  combine(combine(combine(datasets::SmallShapes(),
117  framework::dataset::make("InPlace", { false, true })))
118 {
119  // Validate output
120  validate(Accessor(_target), _reference, tolerance_f16);
121 }
122 TEST_SUITE_END() // FP16
123 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
124 
125 TEST_SUITE_END() // InstanceNormalizationLayer
126 TEST_SUITE_END() // Neon
127 } // namespace validation
128 } // namespace test
129 } // namespace arm_compute
arm_compute::DataLayout::NCHW
@ NCHW
Num samples, channels, height, width.
Datasets.h
arm_compute::test::validation::TEST_SUITE_END
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
Definition: DequantizationLayer.cpp:111
arm_compute::NEInstanceNormalizationLayer::validate
static Status validate(const ITensorInfo *input, const ITensorInfo *output, float gamma=1.0f, float beta=0.0f, float epsilon=1e-12f)
Static function to check if given info will lead to a valid configuration of NEInstanceNormalizationL...
Definition: NEInstanceNormalizationLayer.cpp:82
PaddingCalculator.h
arm_compute::test::validation::input_info
input_info
Definition: DirectConvolutionLayer.cpp:547
arm_compute::test::validation::FIXTURE_DATA_TEST_CASE
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
arm_compute::test::validation::DATA_TEST_CASE
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
Definition: ActivationLayer.cpp:100
arm_compute::test::validation::combine
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65
arm_compute::DataLayout
DataLayout
[DataLayout enum definition]
Definition: CoreTypes.h:110
arm_compute::DataLayout::NHWC
@ NHWC
Num samples, height, width, channels.
arm_compute::test::validation::tolerance_f16
RelativeTolerance< half_float::half > tolerance_f16(half_float::half(0.1))
Tolerance value for comparing reference's output against implementation's output for DataType::F16.
Types.h
NEInstanceNormalizationLayer.h
arm_compute::test::validation::is_valid
bool is_valid
Definition: DirectConv2d.cpp:162
arm_compute::test::validation::validate
validate(CLAccessor(output_state), expected_output)
arm_compute::half
half_float::half half
16-bit floating point type
Definition: CoreTypes.h:36
TensorAllocator.h
arm_compute::test::validation::output_info
output_info
Definition: DirectConvolutionLayer.cpp:547
arm_compute::test::framework::DatasetMode::ALL
@ ALL
arm_compute::test::validation::tolerance_f32
RelativeTolerance< float > tolerance_f32(0.01f)
Tolerance value for comparing reference's output against implementation's output for DataType::F32.
arm_compute::test::validation::ARM_COMPUTE_EXPECT
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
Asserts.h
Accessor.h
arm_compute::DataType::S16
@ S16
signed 16-bit number
Macros.h
arm_compute::test::framework::DatasetMode::PRECOMMIT
@ PRECOMMIT
Tensor.h
arm_compute::test::framework::dataset::make
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Definition: ContainerDataset.h:160
Validation.h
arm_compute::test::validation::NEInstanceNormalizationLayerFixture
InstanceNormalizationLayerValidationFixture< Tensor, Accessor, NEInstanceNormalizationLayer, T > NEInstanceNormalizationLayerFixture
Definition: InstanceNormalizationLayer.cpp:96
arm_compute::test::validation::zip
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
arm_compute
Copyright (c) 2017-2023 Arm Limited.
Definition: introduction.dox:24
arm_compute::test::validation::TEST_SUITE
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
arm_compute::DataType::F16
@ F16
16-bit floating-point number
arm_compute::test::validation::expected
expected
Definition: BatchNormalizationLayer.cpp:166
arm_compute::DataType::F32
@ F32
32-bit floating-point number
arm_compute::test::framework::DatasetMode
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
arm_compute::DataType
DataType
Available data types.
Definition: CoreTypes.h:83
arm_compute::test::framework::LogLevel::ERRORS
@ ERRORS