Compute Library
 23.08
GenerateProposalsLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
29 #include "tests/Globals.h"
30 #include "tests/NEON/Accessor.h"
32 #include "tests/NEON/Helper.h"
33 #include "tests/framework/Macros.h"
36 #include "tests/validation/fixtures/ComputeAllAnchorsFixture.h"
37 #include "utils/TypePrinter.h"
38 
39 namespace arm_compute
40 {
41 namespace test
42 {
43 namespace validation
44 {
45 namespace
46 {
47 using NEComputeAllAnchors = NESynthetizeFunction<NEComputeAllAnchorsKernel>;
48 
49 template <typename U, typename T>
50 inline void fill_tensor(U &&tensor, const std::vector<T> &v)
51 {
52  std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
53 }
54 
55 template <typename T>
56 inline void fill_tensor(Accessor &&tensor, const std::vector<T> &v)
57 {
58  if(tensor.data_layout() == DataLayout::NCHW)
59  {
60  std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
61  }
62  else
63  {
64  const int channels = tensor.shape()[0];
65  const int width = tensor.shape()[1];
66  const int height = tensor.shape()[2];
67  for(int x = 0; x < width; ++x)
68  {
69  for(int y = 0; y < height; ++y)
70  {
71  for(int c = 0; c < channels; ++c)
72  {
73  *(reinterpret_cast<T *>(tensor(Coordinates(c, x, y)))) = *(reinterpret_cast<const T *>(v.data() + x + y * width + c * height * width));
74  }
75  }
76  }
77  }
78 }
79 
80 const auto ComputeAllInfoDataset = framework::dataset::make("ComputeAllInfo",
81 {
82  ComputeAnchorsInfo(10U, 10U, 1. / 16.f),
83  ComputeAnchorsInfo(100U, 1U, 1. / 2.f),
84  ComputeAnchorsInfo(100U, 1U, 1. / 4.f),
85  ComputeAnchorsInfo(100U, 100U, 1. / 4.f),
86 
87 });
88 
89 constexpr AbsoluteTolerance<int16_t> tolerance_qsymm16(1);
90 } // namespace
91 
92 TEST_SUITE(NEON)
93 TEST_SUITE(GenerateProposals)
94 
95 // *INDENT-OFF*
96 // clang-format off
98  framework::dataset::make("scores", { TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F32),
99  TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Mismatching types
100  TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Wrong deltas (number of transformation non multiple of 4)
101  TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Wrong anchors (number of values per roi != 5)
102  TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Output tensor num_valid_proposals not scalar
103  TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16)}), // num_valid_proposals not U32
104  framework::dataset::make("deltas",{ TensorInfo(TensorShape(100U, 100U, 36U), 1, DataType::F32),
105  TensorInfo(TensorShape(100U, 100U, 36U), 1, DataType::F32),
106  TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32),
107  TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32),
108  TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32),
109  TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32)})),
110  framework::dataset::make("anchors", { TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
111  TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
112  TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
113  TensorInfo(TensorShape(5U, 9U), 1, DataType::F32),
114  TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
115  TensorInfo(TensorShape(4U, 9U), 1, DataType::F32)})),
116  framework::dataset::make("proposals", { TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
117  TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
118  TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
119  TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
120  TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
121  TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32)})),
122  framework::dataset::make("scores_out", { TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
123  TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
124  TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
125  TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
126  TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
127  TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32)})),
128  framework::dataset::make("num_valid_proposals", { TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
129  TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
130  TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
131  TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
132  TensorInfo(TensorShape(1U, 10U), 1, DataType::U32),
133  TensorInfo(TensorShape(1U, 1U), 1, DataType::F16)})),
134  framework::dataset::make("generate_proposals_info", { GenerateProposalsInfo(10.f, 10.f, 1.f),
135  GenerateProposalsInfo(10.f, 10.f, 1.f),
136  GenerateProposalsInfo(10.f, 10.f, 1.f),
137  GenerateProposalsInfo(10.f, 10.f, 1.f),
138  GenerateProposalsInfo(10.f, 10.f, 1.f),
139  GenerateProposalsInfo(10.f, 10.f, 1.f)})),
140  framework::dataset::make("Expected", { true, false, false, false, false, false })),
141  scores, deltas, anchors, proposals, scores_out, num_valid_proposals, generate_proposals_info, expected)
142 {
143  ARM_COMPUTE_EXPECT(bool(NEGenerateProposalsLayer::validate(&scores.clone()->set_is_resizable(true),
144  &deltas.clone()->set_is_resizable(true),
145  &anchors.clone()->set_is_resizable(true),
146  &proposals.clone()->set_is_resizable(true),
147  &scores_out.clone()->set_is_resizable(true),
148  &num_valid_proposals.clone()->set_is_resizable(true),
149  generate_proposals_info)) == expected, framework::LogLevel::ERRORS);
150 }
151 // clang-format on
152 // *INDENT-ON*
153 
154 template <typename T>
155 using NEComputeAllAnchorsFixture = ComputeAllAnchorsFixture<Tensor, Accessor, NEComputeAllAnchors, T>;
156 
157 TEST_SUITE(Float)
158 TEST_SUITE(FP32)
159 DATA_TEST_CASE(IntegrationTestCaseAllAnchors, framework::DatasetMode::ALL, framework::dataset::make("DataType", { DataType::F32 }),
160  data_type)
161 {
162  const int values_per_roi = 4;
163  const int num_anchors = 3;
164  const int feature_height = 4;
165  const int feature_width = 3;
166 
167  SimpleTensor<float> anchors_expected(TensorShape(values_per_roi, feature_width * feature_height * num_anchors), DataType::F32);
168  fill_tensor(anchors_expected, std::vector<float> { -26, -19, 87, 86,
169  -81, -27, 58, 63,
170  -44, -15, 55, 36,
171  -10, -19, 103, 86,
172  -65, -27, 74, 63,
173  -28, -15, 71, 36,
174  6, -19, 119, 86,
175  -49, -27, 90, 63,
176  -12, -15, 87, 36,
177  -26, -3, 87, 102,
178  -81, -11, 58, 79,
179  -44, 1, 55, 52,
180  -10, -3, 103, 102,
181  -65, -11, 74, 79,
182  -28, 1, 71, 52,
183  6, -3, 119, 102,
184  -49, -11, 90, 79,
185  -12, 1, 87, 52,
186  -26, 13, 87, 118,
187  -81, 5, 58, 95,
188  -44, 17, 55, 68,
189  -10, 13, 103, 118,
190  -65, 5, 74, 95,
191  -28, 17, 71, 68,
192  6, 13, 119, 118,
193  -49, 5, 90, 95,
194  -12, 17, 87, 68,
195  -26, 29, 87, 134,
196  -81, 21, 58, 111,
197  -44, 33, 55, 84,
198  -10, 29, 103, 134,
199  -65, 21, 74, 111,
200  -28, 33, 71, 84,
201  6, 29, 119, 134,
202  -49, 21, 90, 111,
203  -12, 33, 87, 84
204  });
205 
206  Tensor all_anchors;
207  Tensor anchors = create_tensor<Tensor>(TensorShape(4, num_anchors), data_type);
208 
209  // Create and configure function
210  NEComputeAllAnchors compute_anchors;
211  compute_anchors.configure(&anchors, &all_anchors, ComputeAnchorsInfo(feature_width, feature_height, 1. / 16.0));
212  anchors.allocator()->allocate();
213  all_anchors.allocator()->allocate();
214 
215  fill_tensor(Accessor(anchors), std::vector<float> { -26, -19, 87, 86,
216  -81, -27, 58, 63,
217  -44, -15, 55, 36
218  });
219  // Compute function
220  compute_anchors.run();
221  validate(Accessor(all_anchors), anchors_expected);
222 }
223 
224 DATA_TEST_CASE(IntegrationTestCaseGenerateProposals, framework::DatasetMode::ALL, combine(framework::dataset::make("DataType", { DataType::F32 }),
227 {
228  const int values_per_roi = 4;
229  const int num_anchors = 2;
230  const int feature_height = 4;
231  const int feature_width = 5;
232 
233  std::vector<float> scores_vector
234  {
235  5.055894435664012e-04f, 1.270304909820112e-03f, 2.492271113912067e-03f, 5.951663827809190e-03f,
236  7.846917156877404e-03f, 6.776275276294789e-03f, 6.761571012891965e-03f, 4.898292096237725e-03f,
237  6.044472332578605e-04f, 3.203334118759474e-03f, 2.947527908919908e-03f, 6.313238560015770e-03f,
238  7.931767757095738e-03f, 8.764345805102866e-03f, 7.325012199914913e-03f, 4.317069470446271e-03f,
239  2.372537409795522e-03f, 1.589227460352735e-03f, 7.419477503600818e-03f, 3.157690354133824e-05f,
240  1.125915135986472e-03f, 9.865363483872330e-03f, 2.429454743386769e-03f, 2.724460564167563e-03f,
241  7.670409838207963e-03f, 5.558891552328172e-03f, 7.876904873099614e-03f, 6.824746047239291e-03f,
242  7.023817548067892e-03f, 3.651314909238673e-04f, 6.720443709032501e-03f, 5.935615511606155e-03f,
243  2.837349642759774e-03f, 1.787235113610299e-03f, 4.538568889918262e-03f, 3.391510678188818e-03f,
244  7.328474239481874e-03f, 6.306967923936016e-03f, 8.102218904895860e-04f, 3.366646521610209e-03f
245  };
246 
247  std::vector<float> bbx_vector
248  {
249  5.066650471856862e-03, -7.638671742936328e-03, 2.549596503988635e-03, -8.316416756423296e-03,
250  -2.397471917924575e-04, 7.370595187754891e-03, -2.771880178185262e-03, 3.958364873973579e-03,
251  4.493661094712284e-03, 2.016487051533088e-03, -5.893883038142033e-03, 7.570636080807809e-03,
252  -1.395511229386785e-03, 3.686686052704696e-03, -7.738166245767079e-03, -1.947306329828059e-03,
253  -9.299719716045681e-03, -3.476410493413708e-03, -2.390761190919604e-03, 4.359281254364210e-03,
254  -2.135251160164030e-04, 9.203299843371962e-03, 4.042322775006053e-03, -9.464271243910754e-03,
255  2.566239543229305e-03, -9.691093900220627e-03, -4.019283034310979e-03, 8.145470429508792e-03,
256  7.345087308315662e-04, 7.049642787384043e-03, -2.768492313674294e-03, 6.997160053405803e-03,
257  6.675346697112969e-03, 2.353293365652274e-03, -3.612002585241749e-04, 1.592076522068768e-03,
258  -8.354188900818149e-04, -5.232515333564140e-04, 6.946683728847089e-03, -8.469757407935994e-03,
259  -8.985324496496555e-03, 4.885832859017961e-03, -7.662967577576512e-03, 7.284124004335807e-03,
260  -5.812167510299458e-03, -5.760336800482398e-03, 6.040416930336549e-03, 5.861508595443691e-03,
261  -5.509243096133549e-04, -2.006142470055888e-03, -7.205925340416066e-03, -1.117459082969758e-03,
262  4.233247017623154e-03, 8.079257498201178e-03, 2.962639022639513e-03, 7.069474943472751e-03,
263  -8.562946284971293e-03, -8.228634642768271e-03, -6.116245322799971e-04, -7.213122000180859e-03,
264  1.693094399433209e-03, -4.287504459132290e-03, 8.740365683925144e-03, 3.751788160720638e-03,
265  7.006764222862830e-03, 9.676754678358187e-03, -6.458757235812945e-03, -4.486506575589758e-03,
266  -4.371087196816259e-03, 3.542166755953152e-03, -2.504808998699504e-03, 5.666601724512010e-03,
267  -3.691862724546129e-03, 3.689809719085287e-03, 9.079930264704458e-03, 6.365127787359476e-03,
268  2.881681788246101e-06, 9.991866069315165e-03, -1.104757466496565e-03, -2.668455405633477e-03,
269  -1.225748887087659e-03, 6.530536159094015e-03, 3.629468917975644e-03, 1.374426066950348e-03,
270  -2.404098881570632e-03, -4.791365049441602e-03, -2.970654027009094e-03, 7.807553690294366e-03,
271  -1.198321129505323e-03, -3.574885336949881e-03, -5.380848303732298e-03, 9.705151282165116e-03,
272  -1.005217683242201e-03, 9.178094036278405e-03, -5.615977269541644e-03, 5.333533158509859e-03,
273  -2.817116206168516e-03, 6.672609782000503e-03, 6.575769501651313e-03, 8.987596634989362e-03,
274  -1.283530791296188e-03, 1.687717120057778e-03, 3.242391851439037e-03, -7.312060454341677e-03,
275  4.735335326324270e-03, -6.832367028817463e-03, -5.414854835884652e-03, -9.352380213755996e-03,
276  -3.682662043703889e-03, -6.127508590419776e-04, -7.682256596819467e-03, 9.569532628790246e-03,
277  -1.572157284518933e-03, -6.023034366859191e-03, -5.110873282582924e-03, -8.697072236660256e-03,
278  -3.235150419663566e-03, -8.286320236471386e-03, -5.229472409112913e-03, 9.920785896115053e-03,
279  -2.478413362126123e-03, -9.261324796935007e-03, 1.718512310840434e-04, 3.015875488208480e-03,
280  -6.172932549255669e-03, -4.031715551985103e-03, -9.263878005853677e-03, -2.815310738453385e-03,
281  7.075307462133643e-03, 1.404611747938669e-03, -1.518548732533266e-03, -9.293430941655778e-03,
282  6.382186966633246e-03, 8.256835789169248e-03, 3.196907843506736e-03, 8.821615689753433e-03,
283  -7.661543424832439e-03, 1.636273081822326e-03, -8.792373335756125e-03, 2.958775812049877e-03,
284  -6.269300278071262e-03, 6.248285790856450e-03, -3.675414624536002e-03, -1.692616700318762e-03,
285  4.126007647815893e-03, -9.155291689759584e-03, -8.432616039924004e-03, 4.899980636213323e-03,
286  3.511535019681671e-03, -1.582745757177339e-03, -2.703657774917963e-03, 6.738168990840388e-03,
287  4.300455303937919e-03, 9.618312854781494e-03, 2.762142918402472e-03, -6.590025003382154e-03,
288  -2.071168373801788e-03, 8.613893943683627e-03, 9.411190295341036e-03, -6.129018930548372e-03
289  };
290 
291  const std::vector<float> anchors_vector{ -26, -19, 87, 86, -81, -27, 58, 63 };
292  SimpleTensor<float> proposals_expected(TensorShape(5, 9), DataType::F32);
293  fill_tensor(proposals_expected, std::vector<float>
294  {
295  0, 0, 0, 75.269, 64.4388,
296  0, 21.9579, 13.0535, 119, 99,
297  0, 38.303, 0, 119, 87.6447,
298  0, 0, 0, 119, 64.619,
299  0, 0, 20.7997, 74.0714, 99,
300  0, 0, 0, 91.8963, 79.3724,
301  0, 0, 4.42377, 58.1405, 95.1781,
302  0, 0, 13.4405, 104.799, 99,
303  0, 38.9066, 28.2434, 119, 99,
304 
305  });
306 
307  SimpleTensor<float> scores_expected(TensorShape(9), DataType::F32);
308  fill_tensor(scores_expected, std::vector<float>
309  {
310  0.00986536,
311  0.00876435,
312  0.00784692,
313  0.00767041,
314  0.00732847,
315  0.00682475,
316  0.00672044,
317  0.00631324,
318  3.15769e-05
319  });
320 
321  TensorShape scores_shape = TensorShape(feature_width, feature_height, num_anchors);
322  TensorShape deltas_shape = TensorShape(feature_width, feature_height, values_per_roi * num_anchors);
324  {
325  permute(scores_shape, PermutationVector(2U, 0U, 1U));
326  permute(deltas_shape, PermutationVector(2U, 0U, 1U));
327  }
328  // Inputs
329  Tensor scores = create_tensor<Tensor>(scores_shape, data_type, 1, QuantizationInfo(), data_layout);
330  Tensor bbox_deltas = create_tensor<Tensor>(deltas_shape, data_type, 1, QuantizationInfo(), data_layout);
331  Tensor anchors = create_tensor<Tensor>(TensorShape(values_per_roi, num_anchors), data_type);
332 
333  // Outputs
334  Tensor proposals;
335  Tensor num_valid_proposals;
336  Tensor scores_out;
337  num_valid_proposals.allocator()->init(TensorInfo(TensorShape(1), 1, DataType::U32));
338 
339  NEGenerateProposalsLayer generate_proposals;
340  generate_proposals.configure(&scores, &bbox_deltas, &anchors, &proposals, &scores_out, &num_valid_proposals,
341  GenerateProposalsInfo(120, 100, 0.166667f, 1 / 16.0, 6000, 300, 0.7f, 16.0f));
342 
343  // Allocate memory for input/output tensors
344  scores.allocator()->allocate();
345  bbox_deltas.allocator()->allocate();
346  anchors.allocator()->allocate();
347  proposals.allocator()->allocate();
348  num_valid_proposals.allocator()->allocate();
349  scores_out.allocator()->allocate();
350  // Fill inputs
351  fill_tensor(Accessor(scores), scores_vector);
352  fill_tensor(Accessor(bbox_deltas), bbx_vector);
353  fill_tensor(Accessor(anchors), anchors_vector);
354 
355  // Run operator
356  generate_proposals.run();
357  // Gather num_valid_proposals
358  const uint32_t N = *reinterpret_cast<uint32_t *>(num_valid_proposals.ptr_to_element(Coordinates(0, 0)));
359 
360  // Select the first N entries of the proposals
361  Tensor proposals_final;
362  NESlice select_proposals;
363  select_proposals.configure(&proposals, &proposals_final, Coordinates(0, 0), Coordinates(values_per_roi + 1, N));
364 
365  proposals_final.allocator()->allocate();
366  select_proposals.run();
367 
368  // Select the first N entries of the proposals
369  Tensor scores_final;
370  NESlice select_scores;
371  select_scores.configure(&scores_out, &scores_final, Coordinates(0), Coordinates(N));
372  scores_final.allocator()->allocate();
373  select_scores.run();
374 
375  const RelativeTolerance<float> tolerance_f32(1e-5f);
376  // Validate the output
377  validate(Accessor(proposals_final), proposals_expected, tolerance_f32);
378  validate(Accessor(scores_final), scores_expected, tolerance_f32);
379 }
380 
382  combine(combine(framework::dataset::make("NumAnchors", { 2, 4, 8 }), ComputeAllInfoDataset), framework::dataset::make("DataType", { DataType::F32 })))
383 {
384  // Validate output
385  validate(Accessor(_target), _reference);
386 }
387 TEST_SUITE_END() // FP32
388 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
389 TEST_SUITE(FP16)
390 FIXTURE_DATA_TEST_CASE(ComputeAllAnchors, NEComputeAllAnchorsFixture<half>, framework::DatasetMode::ALL,
391  combine(combine(framework::dataset::make("NumAnchors", { 2, 4, 8 }), ComputeAllInfoDataset), framework::dataset::make("DataType", { DataType::F16 })))
392 {
393  // Validate output
394  validate(Accessor(_target), _reference);
395 }
396 TEST_SUITE_END() // FP16
397 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
398 
399 TEST_SUITE_END() // Float
400 
401 template <typename T>
402 using NEComputeAllAnchorsQuantizedFixture = ComputeAllAnchorsQuantizedFixture<Tensor, Accessor, NEComputeAllAnchors, T>;
403 
404 TEST_SUITE(Quantized)
405 TEST_SUITE(QASYMM8)
406 FIXTURE_DATA_TEST_CASE(ComputeAllAnchors, NEComputeAllAnchorsQuantizedFixture<int16_t>, framework::DatasetMode::ALL,
407  combine(combine(combine(framework::dataset::make("NumAnchors", { 2, 4, 8 }), ComputeAllInfoDataset),
409  framework::dataset::make("QuantInfo", { QuantizationInfo(0.125f, 0) })))
410 {
411  // Validate output
412  validate(Accessor(_target), _reference, tolerance_qsymm16);
413 }
414 TEST_SUITE_END() // QASYMM8
415 TEST_SUITE_END() // Quantized
416 
417 TEST_SUITE_END() // GenerateProposals
418 TEST_SUITE_END() // Neon
419 } // namespace validation
420 } // namespace test
421 } // namespace arm_compute
arm_compute::DataLayout::NCHW
@ NCHW
Num samples, channels, height, width.
Datasets.h
arm_compute::test::validation::TEST_SUITE_END
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
Definition: DequantizationLayer.cpp:111
N
unsigned int N
Definition: CpuGemmAssemblyDispatch.cpp:96
arm_compute::test::validation::FIXTURE_DATA_TEST_CASE
FIXTURE_DATA_TEST_CASE(RunSmall, CLAbsLayerFixture< half >, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
Definition: AbsLayer.cpp:50
arm_compute::test::validation::DATA_TEST_CASE
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
Definition: ActivationLayer.cpp:100
ArrayAccessor.h
arm_compute::QuantizationInfo
Quantization information.
Definition: QuantizationInfo.h:68
arm_compute::test::validation::combine
combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
Definition: AbsLayer.cpp:65
arm_compute::NEGenerateProposalsLayer::validate
static Status validate(const ITensorInfo *scores, const ITensorInfo *deltas, const ITensorInfo *anchors, const ITensorInfo *proposals, const ITensorInfo *scores_out, const ITensorInfo *num_valid_proposals, const GenerateProposalsInfo &info)
Static function to check if given info will lead to a valid configuration of NEGenerateProposalsLayer...
Definition: NEGenerateProposalsLayer.cpp:212
arm_compute::DataLayout::NHWC
@ NHWC
Num samples, height, width, channels.
arm_compute::TensorShape
Shape of a tensor.
Definition: TensorShape.h:39
NEGenerateProposalsLayer.h
arm_compute::permute
void permute(Dimensions< T > &dimensions, const PermutationVector &perm)
Permutes given Dimensions according to a permutation vector.
Definition: Helpers.h:146
arm_compute::ComputeAnchorsInfo
ComputeAnchors information class.
Definition: Types.h:1358
arm_compute::DataType::QSYMM16
@ QSYMM16
quantized, symmetric fixed-point 16-bit number
NESlice.h
arm_compute::test::validation::data_layout
const auto data_layout
Definition: ConvolutionLayer.cpp:406
arm_compute::test::validation::validate
validate(CLAccessor(output_state), expected_output)
arm_compute::PermutationVector
Strides PermutationVector
Permutation vector.
Definition: CoreTypes.h:37
arm_compute::test::framework::DatasetMode::ALL
@ ALL
arm_compute::Tensor::allocator
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
Definition: Tensor.cpp:48
NEPermute.h
arm_compute::DataType::U32
@ U32
unsigned 32-bit number
TypePrinter.h
arm_compute::test::validation::ARM_COMPUTE_EXPECT
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
Accessor.h
arm_compute::detail::ObjectType::Tensor
@ Tensor
Macros.h
tensor
CLTensor * tensor
Pointer to the auxiliary tensor.
Definition: ClWorkloadRuntime.cpp:66
arm_compute::test::framework::dataset::make
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Definition: ContainerDataset.h:160
arm_compute::test::validation::data_type
data_type
Definition: Cast.cpp:223
Validation.h
arm_compute::test::validation::fill_tensor
fill_tensor(input_to_input_weights, std::vector< uint8_t >{ 122, 130, 124, 134, 120, 122, 134, 134 })
Globals.h
arm_compute::test::validation::tolerance_f32
RelativeTolerance< float > tolerance_f32(0.01f)
Tolerance value for comparing reference's output against implementation's output for DataType::F32.
NEScheduler.h
NEGenerateProposalsLayerKernel.h
arm_compute::test::SimpleTensor
Simple tensor object that stores elements in a consecutive chunk of memory.
Definition: SimpleTensor.h:58
arm_compute::test::validation::zip
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
arm_compute::TensorAllocator::allocate
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
Definition: TensorAllocator.cpp:132
arm_compute
Copyright (c) 2017-2023 Arm Limited.
Definition: introduction.dox:24
arm_compute::test::validation::TEST_SUITE
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
arm_compute::DataType::F16
@ F16
16-bit floating-point number
arm_compute::test::validation::expected
expected
Definition: BatchNormalizationLayer.cpp:166
arm_compute::DataType::F32
@ F32
32-bit floating-point number
arm_compute::Tensor
Basic implementation of the tensor interface.
Definition: Tensor.h:37
arm_compute::test::framework::DatasetMode
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
arm_compute::test::validation::NEComputeAllAnchorsFixture
ComputeAllAnchorsFixture< Tensor, Accessor, NEComputeAllAnchors, T > NEComputeAllAnchorsFixture
Definition: GenerateProposalsLayer.cpp:155
Helper.h
arm_compute::test::framework::LogLevel::ERRORS
@ ERRORS