Compute Library
 22.05
ElementwiseDivision.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
28 #include "tests/NEON/Accessor.h"
30 #include "tests/datasets/ShapeDatasets.h"
32 #include "tests/framework/Macros.h"
35 #include "tests/validation/fixtures/ElementwiseOperationsFixture.h"
36 
37 namespace arm_compute
38 {
39 namespace test
40 {
41 namespace validation
42 {
43 namespace
44 {
45 RelativeTolerance<float> tolerance_fp32(0.000001f);
46 AbsoluteTolerance<int> tolerance_zero_s32(1); // Tolerance for S32 division
47 
48 /** Input data sets **/
49 const auto ElementwiseDivisionS32Dataset = combine(combine(framework::dataset::make("DataType", DataType::S32),
52 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
53 RelativeTolerance<half> tolerance_fp16(static_cast<half>(0.01f));
54 const auto ElementwiseDivisionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)),
56 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
57 const auto ElementwiseDivisionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
59 const auto InPlaceDataSet = framework::dataset::make("InPlace", { false, true });
60 const auto OutOfPlaceDataSet = framework::dataset::make("InPlace", { false });
61 } // namespace
62 
63 TEST_SUITE(NEON)
64 TEST_SUITE(ElementwiseDivision)
65 
66 template <typename T>
67 using NEElementwiseDivisionFixture = ArithmeticDivisionValidationFixture<Tensor, Accessor, NEElementwiseDivision, T>;
68 
69 // *INDENT-OFF*
70 // clang-format off
72  framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
73  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
74  TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
75  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid data type combination
76  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
77  }),
78  framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
79  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
80  TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
81  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32),
82  TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
83  })),
84  framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
85  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
86  TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
87  TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
88  TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
89  })),
90  framework::dataset::make("Expected", { true, true, true, false, false})),
92 {
93  ARM_COMPUTE_EXPECT(bool(NEElementwiseDivision::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
94 }
95 // clang-format on
96 // *INDENT-ON*
97 
98 // Test test cases will execute the function with dynamic-stated shapes
99 // Since other elementwise operations share the same kernel, this tests are added only here.
100 // Also, only FP32 is tested since data type doesn't/shouldn't matter with dynamic shapes.
101 TEST_SUITE(DynamicShape)
102 template <typename T>
103 using CpuElementwiseDivisionDynamicShapeFixture = ArithmeticDivisionDynamicShapeValidationFixture<Tensor, Accessor, NEElementwiseDivision, T>;
104 
105 template <typename T>
106 using CpuElementwiseDivisionBroadcastDynamicShapeFixture = ArithmeticDivisionBroadcastDynamicShapeValidationFixture<Tensor, Accessor, NEElementwiseDivision, T>;
107 
108 TEST_SUITE(F32)
109 
110 FIXTURE_DATA_TEST_CASE(RunSmall, CpuElementwiseDivisionDynamicShapeFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ElementwiseDivisionFP32Dataset),
111  InPlaceDataSet))
112 {
113  // Validate output
114  validate(Accessor(_target), _reference, tolerance_fp32, 0.01);
115 }
116 
118  ElementwiseDivisionFP32Dataset),
119  OutOfPlaceDataSet))
120 {
121  // Validate output
122  validate(Accessor(_target), _reference, tolerance_fp32, 0.01);
123 }
124 
125 TEST_SUITE_END() // F32
126 TEST_SUITE_END() // DynamicShape
127 
128 TEST_SUITE(Float)
129 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
131 FIXTURE_DATA_TEST_CASE(RunSmall, NEElementwiseDivisionFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ElementwiseDivisionFP16Dataset),
132  InPlaceDataSet))
133 {
134  // Validate output
135  validate(Accessor(_target), _reference, tolerance_fp16, 0.01);
136 }
137 TEST_SUITE_END() // F16
138 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
139 
141 FIXTURE_DATA_TEST_CASE(RunSmall, NEElementwiseDivisionFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ElementwiseDivisionFP32Dataset),
142  InPlaceDataSet))
143 {
144  // Validate output
145  validate(Accessor(_target), _reference, tolerance_fp32, 0.01);
146 }
147 
148 template <typename T>
149 using NEElementwiseDivisionBroadcastFixture = ArithmeticDivisionBroadcastValidationFixture<Tensor, Accessor, NEElementwiseDivision, T>;
150 
152  ElementwiseDivisionFP32Dataset),
153  OutOfPlaceDataSet))
154 {
155  // Validate output
156  validate(Accessor(_target), _reference, tolerance_fp32, 0.01);
157 }
159  ElementwiseDivisionFP32Dataset),
160  InPlaceDataSet))
161 {
162  // Validate output
163  validate(Accessor(_target), _reference, tolerance_fp32, 0.01);
164 }
165 TEST_SUITE_END() // F32
166 TEST_SUITE_END() // Float
167 
168 TEST_SUITE(Integer)
170 FIXTURE_DATA_TEST_CASE(RunSmall, NEElementwiseDivisionFixture<int32_t>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ElementwiseDivisionS32Dataset),
171  InPlaceDataSet))
172 {
173  // Validate output
174  validate(Accessor(_target), _reference, tolerance_zero_s32);
175 }
176 TEST_SUITE_END() // S32
177 TEST_SUITE_END() // Integer
178 
179 TEST_SUITE_END() // ElementwiseDivision
180 TEST_SUITE_END() // Neon
181 } // namespace validation
182 } // namespace test
183 } // namespace arm_compute
ArithmeticDivisionValidationFixture< Tensor, Accessor, NEElementwiseDivision, T > NEElementwiseDivisionFixture
Shape of a tensor.
Definition: TensorShape.h:39
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info=ActivationLayerInfo())
Static function to check if given info will lead to a valid configuration of cpu::kernels::CpuArithme...
1 channel, 1 F32 per channel
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
RelativeTolerance< float > tolerance_fp32(0.001f)
Copyright (c) 2017-2022 Arm Limited.
1 channel, 1 F16 per channel
1 channel, 1 S32 per channel
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
Accessor implementation for Tensor objects.
Definition: Accessor.h:35
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
ArithmeticDivisionDynamicShapeValidationFixture< Tensor, Accessor, NEElementwiseDivision, T > CpuElementwiseDivisionDynamicShapeFixture
validate(CLAccessor(output_state), expected_output)
ArithmeticDivisionBroadcastValidationFixture< Tensor, Accessor, NEElementwiseDivision, T > NEElementwiseDivisionBroadcastFixture
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
ArithmeticDivisionBroadcastDynamicShapeValidationFixture< Tensor, Accessor, NEElementwiseDivision, T > CpuElementwiseDivisionBroadcastDynamicShapeFixture
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65