Compute Library
 21.02
ArithmeticAddition.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
28 #include "tests/NEON/Accessor.h"
30 #include "tests/datasets/ConvertPolicyDataset.h"
31 #include "tests/datasets/ShapeDatasets.h"
33 #include "tests/framework/Macros.h"
36 #include "tests/validation/fixtures/ArithmeticOperationsFixture.h"
37 
38 namespace arm_compute
39 {
40 namespace test
41 {
42 namespace validation
43 {
44 namespace
45 {
46 #if !defined(__aarch64__) || defined(__ARM_FEATURE_SVE)
47 constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
48 #else // !defined(__aarch64__) || defined(__ARM_FEATURE_SVE)
49 constexpr AbsoluteTolerance<float> tolerance_quant(0);
50 #endif // !defined(__aarch64__) || defined(__ARM_FEATURE_SVE)
51 
52 /** Input data sets **/
53 const auto ArithmeticAdditionU8Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U8)), framework::dataset::make("DataType",
54  DataType::U8));
55 const auto ArithmeticAdditionS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)),
57 const auto ArithmeticAdditionS32Dataset = combine(combine(framework::dataset::make("DataType", { DataType::S32 }), framework::dataset::make("DataType", DataType::S32)),
59 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
60 const auto ArithmeticAdditionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)),
62 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
63 const auto ArithmeticAdditionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
65 const auto ArithmeticAdditionQASYMM8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::QASYMM8)),
67 const auto ArithmeticAdditionQASYMM8SIGNEDDataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8_SIGNED), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
69 const auto ArithmeticAdditionQSYMM16Dataset = combine(combine(framework::dataset::make("DataType", DataType::QSYMM16), framework::dataset::make("DataType", DataType::QSYMM16)),
71 } // namespace
72 
73 TEST_SUITE(NEON)
74 TEST_SUITE(ArithmeticAddition)
75 
76 template <typename T>
77 using NEArithmeticAdditionFixture = ArithmeticAdditionValidationFixture<Tensor, Accessor, NEArithmeticAddition, T>;
78 
79 // *INDENT-OFF*
80 // clang-format off
82  framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
83  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
84  TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8), // Unsupported broadcast
85  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination
86  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),// Mismatching shapes
87  }),
88  framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
89  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
91  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
92  TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
93  })),
94  framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
95  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
96  TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::S16),
97  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
98  TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
99  })),
100  framework::dataset::make("Expected", { true, true, false, false, false})),
101  input1_info, input2_info, output_info, expected)
102 {
103  Status s = NEArithmeticAddition::validate(&input1_info.clone()->set_is_resizable(false),
104  &input2_info.clone()->set_is_resizable(false),
105  &output_info.clone()->set_is_resizable(false),
108 }
109 // clang-format on
110 // *INDENT-ON*
111 
113 {
114  // NEArithmeticAddition doesn't use padding, so make sure this is the case.
115  Tensor input1 = create_tensor<Tensor>(TensorShape(15U, 15U), DataType::F32);
116  Tensor input2 = create_tensor<Tensor>(TensorShape(15U, 1U), DataType::F32);
117  Tensor output = create_tensor<Tensor>(TensorShape(15U, 15U), DataType::F32);
118 
120  add.configure(&input1, &input2, &output, ConvertPolicy::WRAP);
121 
122  // Validate padding is zero
123  validate(input1.info()->padding(), PaddingSize());
124  validate(input2.info()->padding(), PaddingSize());
125  validate(output.info()->padding(), PaddingSize());
126 }
127 
128 TEST_SUITE(Integer)
129 TEST_SUITE(U8)
130 FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionU8Dataset),
131  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
132 {
133  // Validate output
134  validate(Accessor(_target), _reference);
135 }
136 TEST_SUITE_END() // U8
137 
139 FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionS16Dataset),
140  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
141 {
142  // Validate output
143  validate(Accessor(_target), _reference);
144 }
145 
146 FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ArithmeticAdditionS16Dataset),
148 {
149  // Validate output
150  validate(Accessor(_target), _reference);
151 }
152 TEST_SUITE_END() // S16
153 
155 FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<int32_t>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionS32Dataset),
156  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
157 {
158  // Validate output
159  validate(Accessor(_target), _reference);
160 }
161 TEST_SUITE_END() // S32
162 TEST_SUITE_END() // Integer
163 
164 TEST_SUITE(Float)
165 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
167 FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP16Dataset),
169 {
170  // Validate output
171  validate(Accessor(_target), _reference);
172 }
173 TEST_SUITE_END() // F16
174 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
175 
177 FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticAdditionFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), ArithmeticAdditionFP32Dataset),
178  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
179 {
180  // Validate output
181  validate(Accessor(_target), _reference);
182 }
183 
184 FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ArithmeticAdditionFP32Dataset),
186 {
187  // Validate output
188  validate(Accessor(_target), _reference);
189 }
190 
191 template <typename T>
192 using NEArithmeticAdditionBroadcastFixture = ArithmeticAdditionBroadcastValidationFixture<Tensor, Accessor, NEArithmeticAddition, T>;
193 
195  ArithmeticAdditionFP32Dataset),
197 {
198  // Validate output
199  validate(Accessor(_target), _reference);
200 }
201 
203  ArithmeticAdditionFP32Dataset),
205 {
206  // Validate output
207  validate(Accessor(_target), _reference);
208 }
209 TEST_SUITE_END() // F32
210 TEST_SUITE_END() // Float
211 
212 template <typename T>
213 using NEArithmeticAdditionQuantizedFixture = ArithmeticAdditionValidationQuantizedFixture<Tensor, Accessor, NEArithmeticAddition, T>;
214 
215 template <typename T>
216 using NEArithmeticAdditionQuantizedBroadcastFixture = ArithmeticAdditionValidationQuantizedBroadcastFixture<Tensor, Accessor, NEArithmeticAddition, T>;
217 
218 TEST_SUITE(Quantized)
219 TEST_SUITE(QASYMM8)
221  NEArithmeticAdditionQuantizedFixture<uint8_t>,
222  framework::DatasetMode::PRECOMMIT,
223  combine(combine(combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQASYMM8Dataset),
224  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
225  framework::dataset::make("Src0QInfo", { QuantizationInfo(5.f / 255.f, 20) })),
226  framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 255.f, 10) })),
227  framework::dataset::make("OutQInfo", { QuantizationInfo(1.f / 255.f, 5) })))
228 {
229  // Validate output
230  validate(Accessor(_target), _reference, tolerance_quant);
231 }
232 TEST_SUITE_END() // QASYMM8
233 
235 FIXTURE_DATA_TEST_CASE(RunSmall,
238  combine(combine(combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQASYMM8SIGNEDDataset),
239  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
240  framework::dataset::make("Src0QInfo", { QuantizationInfo(0.5f, 20) })),
241  framework::dataset::make("Src1QInfo", { QuantizationInfo(0.5f, 10) })),
242  framework::dataset::make("OutQInfo", { QuantizationInfo(0.5f, 5) })))
243 {
244  // Validate output
245  validate(Accessor(_target), _reference, tolerance_quant);
246 }
247 
249  datasets::SmallShapesBroadcast(), ArithmeticAdditionQASYMM8SIGNEDDataset),
250  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
251  framework::dataset::make("Src0QInfo", { QuantizationInfo(0.5f, 20) })),
252  framework::dataset::make("Src1QInfo", { QuantizationInfo(0.5f, 10) })),
253  framework::dataset::make("OutQInfo", { QuantizationInfo(0.5f, 5) })))
254 {
255  // Validate output
256  validate(Accessor(_target), _reference, tolerance_quant);
257 }
258 TEST_SUITE_END() // QASYMM8_SIGNED
259 
261 FIXTURE_DATA_TEST_CASE(RunSmall,
264  combine(combine(combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQSYMM16Dataset),
265  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
266  framework::dataset::make("Src0QInfo", { QuantizationInfo(1.f / 32768.f, 0), QuantizationInfo(5.f / 32768.f, 0) })),
267  framework::dataset::make("Src1QInfo", { QuantizationInfo(2.f / 32768.f, 0), QuantizationInfo(5.f / 32768.f, 0) })),
268  framework::dataset::make("OutQInfo", { QuantizationInfo(5.f / 32768.f, 0) })))
269 {
270  // Validate output
271  validate(Accessor(_target), _reference, tolerance_quant);
272 }
273 TEST_SUITE_END() // QSYMM16
274 TEST_SUITE_END() // Quantized
275 
276 TEST_SUITE_END() // ArithmeticAddition
277 TEST_SUITE_END() // Neon
278 } // namespace validation
279 } // namespace test
280 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
Basic function to run cpu::kernels::CpuAddKernel.
quantized, symmetric fixed-point 16-bit number
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of NEArithmeticAddition.
1 channel, 1 U8 per channel
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Status class.
Definition: Error.h:52
void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Initialise the kernel&#39;s inputs, output and conversion policy.
Copyright (c) 2017-2021 Arm Limited.
1 channel, 1 F16 per channel
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor&#39;s metadata.
Definition: Tensor.cpp:33
1 channel, 1 S32 per channel
Quantization information.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
Accessor implementation for Tensor objects.
Definition: Accessor.h:35
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
Basic implementation of the tensor interface.
Definition: Tensor.h:37
TEST_SUITE(U8_to_S8) FIXTURE_DATA_TEST_CASE(RunSmall
validate(CLAccessor(output_state), expected_output)
virtual PaddingSize padding() const =0
Padding of tensor.
BorderSize PaddingSize
Container for 2D padding size.
Definition: Types.h:382
ArithmeticAdditionBroadcastValidationFixture< Tensor, Accessor, NEArithmeticAddition, T > NEArithmeticAdditionBroadcastFixture
1 channel, 1 S16 per channel
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
ArithmeticAdditionValidationQuantizedBroadcastFixture< Tensor, Accessor, NEArithmeticAddition, T > NEArithmeticAdditionQuantizedBroadcastFixture
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:45
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
quantized, asymmetric fixed-point 8-bit number signed
ArithmeticAdditionValidationFixture< Tensor, Accessor, NEArithmeticAddition, T > NEArithmeticAdditionFixture
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65
ConvertPolicy
Policy to handle overflow.
Definition: Types.h:385