Compute Library
 22.05
ArithmeticSubtraction.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
28 #include "tests/NEON/Accessor.h"
30 #include "tests/datasets/ConvertPolicyDataset.h"
31 #include "tests/datasets/ShapeDatasets.h"
33 #include "tests/framework/Macros.h"
36 #include "tests/validation/fixtures/ArithmeticOperationsFixture.h"
37 
38 namespace arm_compute
39 {
40 namespace test
41 {
42 namespace validation
43 {
44 namespace
45 {
46 #ifdef __aarch64__
47 constexpr AbsoluteTolerance<float> tolerance_qasymm8(0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
48 #else //__aarch64__
49 constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
50 #endif //__aarch64__
51 constexpr AbsoluteTolerance<int16_t> tolerance_qsymm16(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
52 
53 // Quantization Infomation DataSet
54 const auto ArithmeticSubtractionQuantizationInfoDataset = combine(combine(framework::dataset::make("QuantizationInfoIn1", { QuantizationInfo(10, 120) }),
55  framework::dataset::make("QuantizationInfoIn2", { QuantizationInfo(20, 110) })),
56  framework::dataset::make("QuantizationInfoOut", { QuantizationInfo(15, 125) }));
57 const auto ArithmeticSubtractionQuantizationInfoSignedDataset = combine(combine(framework::dataset::make("QuantizationInfoIn1", { QuantizationInfo(0.5f, 10) }),
58  framework::dataset::make("QuantizationInfoIn2", { QuantizationInfo(0.5f, 20) })),
59  framework::dataset::make("QuantizationInfoOut", { QuantizationInfo(0.5f, 50) }));
60 const auto ArithmeticSubtractionQuantizationInfoSignedInPlaceDataset = combine(combine(framework::dataset::make("QuantizationInfoIn1", { QuantizationInfo(0.8f, 10) }),
61  framework::dataset::make("QuantizationInfoIn2", { QuantizationInfo(0.8f, 10) })),
62  framework::dataset::make("QuantizationInfoOut", { QuantizationInfo(0.8f, 10) }));
63 const auto ArithmeticSubtractionQuantizationInfoSymmetric = combine(combine(framework::dataset::make("QuantizationInfoIn1", { QuantizationInfo(0.3f, 0) }),
64  framework::dataset::make("QuantizationInfoIn2", { QuantizationInfo(0.7f, 0) })),
65  framework::dataset::make("QuantizationInfoOut", { QuantizationInfo(0.2f, 0) }));
66 const auto InPlaceDataSet = framework::dataset::make("InPlace", { false, true });
67 const auto OutOfPlaceDataSet = framework::dataset::make("InPlace", { false });
68 } // namespace
69 
70 TEST_SUITE(NEON)
71 TEST_SUITE(ArithmeticSubtraction)
72 
73 template <typename T>
74 using NEArithmeticSubtractionFixture = ArithmeticSubtractionValidationFixture<Tensor, Accessor, NEArithmeticSubtraction, T>;
75 
76 // *INDENT-OFF*
77 // clang-format off
79  framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
80  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8), // Invalid data type combination
81  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
82  TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::QASYMM8), // Mismatching types
83  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), // Invalid convert policy
84  }),
85  framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
86  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
87  TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
88  TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
90  })),
91  framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
92  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
93  TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
96  })),
102  })),
103  framework::dataset::make("Expected", { true, false, false, false, false})),
105 {
106  ARM_COMPUTE_EXPECT(bool(NEArithmeticSubtraction::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), policy)) == expected, framework::LogLevel::ERRORS);
107 }
108 // clang-format on
109 // *INDENT-ON*
110 
111 TEST_SUITE(InPlaceValidate)
113 {
114  const auto random_shape = TensorShape{ 9, 9 };
115  const auto single_tensor_info = TensorInfo{ random_shape, 1, DataType::F32 };
116 
117  Status result = NEArithmeticSubtraction::validate(&single_tensor_info, &single_tensor_info, &single_tensor_info, ConvertPolicy::WRAP);
118  ARM_COMPUTE_EXPECT(bool(result) == true, framework::LogLevel::ERRORS);
119 }
120 
121 TEST_CASE(ValidBroadCast, framework::DatasetMode::ALL)
122 {
123  const auto larger_shape = TensorShape{ 27U, 13U, 2U };
124  const auto smaller_shape = TensorShape{ 1U, 13U, 2U };
125 
126  const auto larger_tensor_info = TensorInfo{ larger_shape, 1, DataType::F32 };
127  const auto smaller_tensor_info = TensorInfo{ smaller_shape, 1, DataType::F32 };
128 
129  Status result = NEArithmeticSubtraction::validate(&larger_tensor_info, &smaller_tensor_info, &larger_tensor_info, ConvertPolicy::WRAP);
130  ARM_COMPUTE_EXPECT(bool(result) == true, framework::LogLevel::ERRORS);
131 }
132 
133 TEST_CASE(InvalidBroadcastOutput, framework::DatasetMode::ALL)
134 {
135  const auto larger_shape = TensorShape{ 27U, 13U, 2U };
136  const auto smaller_shape = TensorShape{ 1U, 13U, 2U };
137 
138  const auto larger_tensor_info = TensorInfo{ larger_shape, 1, DataType::F32 };
139  const auto smaller_tensor_info = TensorInfo{ smaller_shape, 1, DataType::F32 };
140 
141  Status result = NEArithmeticSubtraction::validate(&larger_tensor_info, &smaller_tensor_info, &smaller_tensor_info, ConvertPolicy::WRAP);
142  ARM_COMPUTE_EXPECT(bool(result) == false, framework::LogLevel::ERRORS);
143 }
144 
145 TEST_CASE(InvalidBroadcastBoth, framework::DatasetMode::ALL)
146 {
147  const auto shape0 = TensorShape{ 9U, 9U };
148  const auto shape1 = TensorShape{ 9U, 1U, 2U };
149 
150  const auto info0 = TensorInfo{ shape0, 1, DataType::F32 };
151  const auto info1 = TensorInfo{ shape1, 1, DataType::F32 };
152 
153  Status result{};
154 
155  result = NEArithmeticSubtraction::validate(&info0, &info1, &info0, ConvertPolicy::WRAP);
156  ARM_COMPUTE_EXPECT(bool(result) == false, framework::LogLevel::ERRORS);
157 
158  result = NEArithmeticSubtraction::validate(&info0, &info1, &info1, ConvertPolicy::WRAP);
159  ARM_COMPUTE_EXPECT(bool(result) == false, framework::LogLevel::ERRORS);
160 }
161 TEST_SUITE_END() // InPlaceValidate
162 
163 TEST_SUITE(U8)
164 FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType",
165  DataType::U8)),
166  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
167  OutOfPlaceDataSet))
168 {
169  // Validate output
170  validate(Accessor(_target), _reference);
171 }
172 TEST_SUITE_END() // U8
173 
174 using NEArithmeticSubtractionQASYMM8Fixture = ArithmeticSubtractionValidationQuantizedFixture<Tensor, Accessor, NEArithmeticSubtraction, uint8_t>;
175 using NEArithmeticSubtractionQASYMM8SignedFixture = ArithmeticSubtractionValidationQuantizedFixture<Tensor, Accessor, NEArithmeticSubtraction, int8_t>;
176 using NEArithmeticSubtractionQASYMM8SignedBroadcastFixture = ArithmeticSubtractionValidationQuantizedBroadcastFixture<Tensor, Accessor, NEArithmeticSubtraction, int8_t>;
177 using NEArithmeticSubtractionQSYMM16Fixture = ArithmeticSubtractionValidationQuantizedFixture<Tensor, Accessor, NEArithmeticSubtraction, int16_t>;
178 
179 TEST_SUITE(Quantized)
180 TEST_SUITE(QASYMM8)
181 FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionQASYMM8Fixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType",
182  DataType::QASYMM8)),
183  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
184  ArithmeticSubtractionQuantizationInfoDataset),
185  OutOfPlaceDataSet))
186 {
187  // Validate output
188  validate(Accessor(_target), _reference, tolerance_qasymm8);
189 }
190 TEST_SUITE_END() // QASYMM8
191 
194  datasets::SmallShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
195  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
196  ArithmeticSubtractionQuantizationInfoSignedDataset),
197  OutOfPlaceDataSet))
198 {
199  // Validate output
200  validate(Accessor(_target), _reference, tolerance_qasymm8);
201 }
203  datasets::SmallShapesBroadcast(),
205  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
206  ArithmeticSubtractionQuantizationInfoSignedDataset),
207  OutOfPlaceDataSet))
208 {
209  // Validate output
210  validate(Accessor(_target), _reference, tolerance_qasymm8);
211 }
213  datasets::TinyShapesBroadcastInplace(),
215  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
216  ArithmeticSubtractionQuantizationInfoSignedInPlaceDataset),
217  InPlaceDataSet))
218 {
219  // Validate output
220  validate(Accessor(_target), _reference, tolerance_qasymm8);
221 }
222 TEST_SUITE_END() // QASYMM8_SIGNED
223 
226  datasets::SmallShapes(),
227  framework::dataset::make("DataType", DataType::QSYMM16)),
228  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
229  ArithmeticSubtractionQuantizationInfoSymmetric),
230  OutOfPlaceDataSet))
231 {
232  // Validate output
233  validate(Accessor(_target), _reference, tolerance_qsymm16);
234 }
235 TEST_SUITE_END() // QSYMM16
236 TEST_SUITE_END() // Quantized
237 
239 FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType",
240  DataType::S16)),
241  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
242  OutOfPlaceDataSet))
243 {
244  // Validate output
245  validate(Accessor(_target), _reference);
246 }
247 
249  DataType::S16)),
251  OutOfPlaceDataSet))
252 {
253  // Validate output
254  validate(Accessor(_target), _reference);
255 }
256 TEST_SUITE_END() // S16
257 
259 FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixture<int32_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType",
260  DataType::S32)),
261  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
262  OutOfPlaceDataSet))
263 {
264  // Validate output
265  validate(Accessor(_target), _reference);
266 }
267 
269  DataType::S32)),
271  OutOfPlaceDataSet))
272 {
273  // Validate output
274  validate(Accessor(_target), _reference);
275 }
276 TEST_SUITE_END() // S32
277 
278 TEST_SUITE(Float)
279 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
282  DataType::F16)),
284  OutOfPlaceDataSet))
285 {
286  // Validate output
287  validate(Accessor(_target), _reference);
288 }
289 TEST_SUITE_END() // F16
290 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
291 
293 FIXTURE_DATA_TEST_CASE(RunSmall, NEArithmeticSubtractionFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType",
294  DataType::F32)),
295  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
296  InPlaceDataSet))
297 {
298  // Validate output
299  validate(Accessor(_target), _reference);
300 }
301 
303  DataType::F32)),
305  OutOfPlaceDataSet))
306 {
307  // Validate output
308  validate(Accessor(_target), _reference);
309 }
310 
311 template <typename T>
312 using NEArithmeticSubtractionBroadcastFixture = ArithmeticSubtractionBroadcastValidationFixture<Tensor, Accessor, NEArithmeticSubtraction, T>;
313 
317  OutOfPlaceDataSet))
318 {
319  // Validate output
320  validate(Accessor(_target), _reference);
321 }
322 
326  OutOfPlaceDataSet))
327 {
328  // Validate output
329  validate(Accessor(_target), _reference);
330 }
331 TEST_SUITE_END() // F32
332 TEST_SUITE_END() // Float
333 
334 TEST_SUITE_END() // ArithmeticSubtraction
335 TEST_SUITE_END() // Neon
336 } // namespace validation
337 } // namespace test
338 } // namespace arm_compute
Shape of a tensor.
Definition: TensorShape.h:39
quantized, symmetric fixed-point 16-bit number
1 channel, 1 U8 per channel
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
ArithmeticSubtractionBroadcastValidationFixture< Tensor, Accessor, NEArithmeticSubtraction, T > NEArithmeticSubtractionBroadcastFixture
Status class.
Definition: Error.h:52
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
1 channel, 1 S32 per channel
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
Accessor implementation for Tensor objects.
Definition: Accessor.h:35
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
validate(CLAccessor(output_state), expected_output)
1 channel, 1 S16 per channel
ArithmeticSubtractionValidationQuantizedFixture< Tensor, Accessor, NEArithmeticSubtraction, int8_t > NEArithmeticSubtractionQASYMM8SignedFixture
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
ArithmeticSubtractionValidationQuantizedFixture< Tensor, Accessor, NEArithmeticSubtraction, int16_t > NEArithmeticSubtractionQSYMM16Fixture
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
quantized, asymmetric fixed-point 8-bit number signed
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
DataType
Available data types.
Definition: Types.h:79
ArithmeticSubtractionValidationQuantizedBroadcastFixture< Tensor, Accessor, NEArithmeticSubtraction, int8_t > NEArithmeticSubtractionQASYMM8SignedBroadcastFixture
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65
ConvertPolicy
Policy to handle integer overflow.
Definition: Types.h:404
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of NEArithmeticSubtraction...
ArithmeticSubtractionValidationFixture< Tensor, Accessor, NEArithmeticSubtraction, T > NEArithmeticSubtractionFixture