Compute Library
 23.05
ActivationLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020, 2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
29 #include "tests/CL/CLAccessor.h"
31 #include "tests/datasets/ActivationFunctionsDataset.h"
32 #include "tests/datasets/ShapeDatasets.h"
34 #include "tests/framework/Macros.h"
37 #include "tests/validation/fixtures/ActivationLayerFixture.h"
38 
39 namespace arm_compute
40 {
41 namespace test
42 {
43 namespace validation
44 {
45 namespace
46 {
47 constexpr AbsoluteTolerance<float> tolerance_qsymm16(1.f);
48 
49 /** Define tolerance of the activation layer.
50  *
51  * @param[in] activation The activation function used.
52  * @param[in] data_type Data type.
53  *
54  * @return Tolerance depending on the activation function.
55  */
56 AbsoluteTolerance<float> tolerance(ActivationLayerInfo::ActivationFunction activation, DataType data_type)
57 {
58  constexpr float epsilon = 1e-6f;
59 
60  switch(activation)
61  {
63  return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.2f : epsilon);
65  return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.1f : epsilon);
67  return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : epsilon);
69  return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.00001f : epsilon);
74  return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : 0.00001f);
76  return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : 0.00001f);
78  return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : epsilon);
79  default:
80  return AbsoluteTolerance<float>(epsilon);
81  }
82 }
83 
84 /** CNN data types */
85 const auto CNNDataTypes = framework::dataset::make("DataType",
86 {
89 });
90 
91 /** Input data sets. */
92 const auto ActivationDataset = combine(combine(framework::dataset::make("InPlace", { false, true }), datasets::ActivationFunctions()), framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
93 
94 } // namespace
95 
96 TEST_SUITE(CL)
97 TEST_SUITE(ActivationLayer)
98 // *INDENT-OFF*
99 // clang-format off
100 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
101  framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types
102  TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
103  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
105  TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Invalid quantization info
106  TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
109  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), // Invalid activation function for QSYMM16
110  }),
111  framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
112  TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
113  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
116  TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
117  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)),
118  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)),
119  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)),
120  })),
130  })),
131  framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })),
133 {
134  ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), act_info)) == expected, framework::LogLevel::ERRORS);
135 }
136 
137 // clang-format on
138 // *INDENT-ON*
139 
140 /** [CLActivationLayerFixture snippet] **/
141 template <typename T>
142 using CLActivationLayerFixture = ActivationValidationFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
143 /** [CLActivationLayerFixture snippet] **/
144 
145 TEST_SUITE(Float)
146 TEST_SUITE(FP16)
147 /** [CLActivationLayer Test snippet] **/
148 FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset),
149  framework::dataset::make("DataType",
150  DataType::F16)))
151 {
152  // Validate output
153  validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
154 }
155 /** [CLActivationLayer Test snippet] **/
156 TEST_SUITE_END() // FP16
157 
158 TEST_SUITE(FP32)
159 FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType",
160  DataType::F32)))
161 {
162  // Validate output
163  validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
164 }
165 TEST_SUITE_END() // FP32
166 TEST_SUITE_END() // Float
167 
168 template <typename T>
169 using CLActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
170 
172  concat(datasets::ActivationFunctionsQuantized(),
173  framework::dataset::make("ActivationFunction",
175 framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
176 
178  datasets::ActivationFunctionsQuantized()),
179  framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
180 
181 TEST_SUITE(Quantized)
183 FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8),
184  framework::dataset::make("DataType",
185  DataType::QASYMM8)),
186  framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
187 {
188  // Validate output
189  validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
190 }
191 TEST_SUITE_END() // QASYMM8
193 FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8),
194  framework::dataset::make("DataType",
195  DataType::QASYMM8_SIGNED)),
196  framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 10.0f) })))
197 {
198  // Validate output
199  validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
200 }
201 TEST_SUITE_END() // QASYMM8_SIGNED
203 FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset16),
204  framework::dataset::make("DataType",
205  DataType::QSYMM16)),
206  framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) })))
207 {
208  // Validate output
209  validate(CLAccessor(_target), _reference, tolerance_qsymm16);
210 }
211 TEST_SUITE_END() // QSYMM16
212 TEST_SUITE_END() // Quantized
213 
214 TEST_SUITE_END() // ActivationLayer
215 TEST_SUITE_END() // CL
216 } // namespace validation
217 } // namespace test
218 } // namespace arm_compute
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
Static function to check if given info will lead to a valid configuration of CLActivationLayer.
Shape of a tensor.
Definition: TensorShape.h:39
quantized, symmetric fixed-point 16-bit number
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
half_float::half half
16-bit floating point type
Definition: Types.h:48
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Activation Layer Information class.
Definition: Types.h:1659
Copyright (c) 2017-2023 Arm Limited.
ActivationFunction
Available activation functions.
Definition: Types.h:1663
1 channel, 1 F16 per channel
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
ActivationValidationFixture< CLTensor, CLAccessor, CLActivationLayer, T > CLActivationLayerFixture
[CLActivationLayerFixture snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
Accessor implementation for CLTensor objects.
Definition: CLAccessor.h:36
validate(CLAccessor(output_state), expected_output)
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
JoinDataset< T, U > concat(T &&dataset1, U &&dataset2)
Helper function to create a JoinDataset.
Definition: JoinDataset.h:160
quantized, asymmetric fixed-point 8-bit number signed
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
DataType
Available data types.
Definition: Types.h:79
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65