Compute Library
 22.11
DetectionPostProcessLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Types.h"
28 #include "tests/NEON/Accessor.h"
30 #include "tests/datasets/ShapeDatasets.h"
32 #include "tests/framework/Macros.h"
35 
36 namespace arm_compute
37 {
38 namespace test
39 {
40 namespace validation
41 {
42 namespace
43 {
44 template <typename U, typename T>
45 inline void fill_tensor(U &&tensor, const std::vector<T> &v)
46 {
47  std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
48 }
49 template <typename U, typename T>
50 inline void quantize_and_fill_tensor(U &&tensor, const std::vector<T> &v)
51 {
52  QuantizationInfo qi = tensor.quantization_info();
53  std::vector<uint8_t> quantized;
54  quantized.reserve(v.size());
55  for(auto elem : v)
56  {
57  quantized.emplace_back(quantize_qasymm8(elem, qi));
58  }
59  std::memcpy(tensor.data(), quantized.data(), sizeof(uint8_t) * quantized.size());
60 }
61 inline QuantizationInfo qinfo_scaleoffset_from_minmax(const float min, const float max)
62 {
63  int offset = 0;
64  float scale = 0;
65  const uint8_t qmin = std::numeric_limits<uint8_t>::min();
66  const uint8_t qmax = std::numeric_limits<uint8_t>::max();
67  const float f_qmin = qmin;
68  const float f_qmax = qmax;
69 
70  // Continue only if [min,max] is a valid range and not a point
71  if(min != max)
72  {
73  scale = (max - min) / (f_qmax - f_qmin);
74  const float offset_from_min = f_qmin - min / scale;
75  const float offset_from_max = f_qmax - max / scale;
76 
77  const float offset_from_min_error = std::abs(f_qmin) + std::abs(min / scale);
78  const float offset_from_max_error = std::abs(f_qmax) + std::abs(max / scale);
79  const float f_offset = offset_from_min_error < offset_from_max_error ? offset_from_min : offset_from_max;
80 
81  uint8_t uint8_offset = 0;
82  if(f_offset < f_qmin)
83  {
84  uint8_offset = qmin;
85  }
86  else if(f_offset > f_qmax)
87  {
88  uint8_offset = qmax;
89  }
90  else
91  {
92  uint8_offset = static_cast<uint8_t>(support::cpp11::round(f_offset));
93  }
94  offset = uint8_offset;
95  }
96  return QuantizationInfo(scale, offset);
97 }
98 
99 inline void base_test_case(DetectionPostProcessLayerInfo info, DataType data_type, const SimpleTensor<float> &expected_output_boxes,
100  const SimpleTensor<float> &expected_output_classes, const SimpleTensor<float> &expected_output_scores, const SimpleTensor<float> &expected_num_detection,
101  AbsoluteTolerance<float> tolerance_boxes = AbsoluteTolerance<float>(0.1f), AbsoluteTolerance<float> tolerance_others = AbsoluteTolerance<float>(0.1f))
102 {
103  Tensor box_encoding = create_tensor<Tensor>(TensorShape(4U, 6U, 1U), data_type, 1, qinfo_scaleoffset_from_minmax(-1.0f, 1.0f));
104  Tensor class_prediction = create_tensor<Tensor>(TensorShape(3U, 6U, 1U), data_type, 1, qinfo_scaleoffset_from_minmax(0.0f, 1.0f));
105  Tensor anchors = create_tensor<Tensor>(TensorShape(4U, 6U), data_type, 1, qinfo_scaleoffset_from_minmax(0.0f, 100.5f));
106 
107  box_encoding.allocator()->allocate();
108  class_prediction.allocator()->allocate();
109  anchors.allocator()->allocate();
110 
111  std::vector<float> box_encoding_vector =
112  {
113  0.0f, 1.0f, 0.0f, 0.0f,
114  0.0f, -1.0f, 0.0f, 0.0f,
115  0.0f, 0.0f, 0.0f, 0.0f,
116  0.0f, 0.0f, 0.0f, 0.0f,
117  0.0f, 1.0f, 0.0f, 0.0f,
118  0.0f, 0.0f, 0.0f, 0.0f
119  };
120  std::vector<float> class_prediction_vector =
121  {
122  0.0f, 0.7f, 0.68f,
123  0.0f, 0.6f, 0.5f,
124  0.0f, 0.9f, 0.83f,
125  0.0f, 0.91f, 0.97f,
126  0.0f, 0.5f, 0.4f,
127  0.0f, 0.31f, 0.22f
128  };
129  std::vector<float> anchors_vector =
130  {
131  0.4f, 0.4f, 1.1f, 1.1f,
132  0.4f, 0.4f, 1.1f, 1.1f,
133  0.4f, 0.4f, 1.1f, 1.1f,
134  0.4f, 10.4f, 1.1f, 1.1f,
135  0.4f, 10.4f, 1.1f, 1.1f,
136  0.4f, 100.4f, 1.1f, 1.1f
137  };
138 
139  // Fill the tensors with random pre-generated values
140  if(data_type == DataType::F32)
141  {
142  fill_tensor(Accessor(box_encoding), box_encoding_vector);
143  fill_tensor(Accessor(class_prediction), class_prediction_vector);
144  fill_tensor(Accessor(anchors), anchors_vector);
145  }
146  else
147  {
148  quantize_and_fill_tensor(Accessor(box_encoding), box_encoding_vector);
149  quantize_and_fill_tensor(Accessor(class_prediction), class_prediction_vector);
150  quantize_and_fill_tensor(Accessor(anchors), anchors_vector);
151  }
152 
153  // Determine the output through the Compute Library operator
154  Tensor output_boxes;
155  Tensor output_classes;
156  Tensor output_scores;
157  Tensor num_detection;
158  NEDetectionPostProcessLayer detection;
159  detection.configure(&box_encoding, &class_prediction, &anchors, &output_boxes, &output_classes, &output_scores, &num_detection, info);
160 
161  output_boxes.allocator()->allocate();
162  output_classes.allocator()->allocate();
163  output_scores.allocator()->allocate();
164  num_detection.allocator()->allocate();
165 
166  // Run the kernel
167  detection.run();
168 
169  // Validate against the expected output
170  // Validate output boxes
171  validate(Accessor(output_boxes), expected_output_boxes, tolerance_boxes);
172  // Validate detection classes
173  validate(Accessor(output_classes), expected_output_classes, tolerance_others);
174  // Validate detection scores
175  validate(Accessor(output_scores), expected_output_scores, tolerance_others);
176  // Validate num detections
177  validate(Accessor(num_detection), expected_num_detection, tolerance_others);
178 }
179 } // namespace
180 
181 TEST_SUITE(NEON)
182 TEST_SUITE(DetectionPostProcessLayer)
183 
184 // *INDENT-OFF*
185 // clang-format off
186 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(zip(
187  framework::dataset::make("BoxEncodingsInfo", { TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
188  TensorInfo(TensorShape(4U, 10U, 3U), 1, DataType::F32), // Mismatching batch_size
189  TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::S8), // Unsupported data type
190  TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32), // Wrong Detection Info
191  TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32), // Wrong boxes dimensions
192  TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8)}), // Wrong score dimension
193  framework::dataset::make("ClassPredsInfo",{ TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
199  framework::dataset::make("AnchorsInfo",{ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
200  TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
201  TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
202  TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
203  TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
204  TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8)})),
205  framework::dataset::make("OutputBoxInfo", { TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32),
207  TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::S8),
210  TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32)})),
211  framework::dataset::make("OuputClassesInfo",{ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
216  TensorInfo(TensorShape(6U, 1U), 1, DataType::F32)})),
217  framework::dataset::make("OutputScoresInfo",{ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
222  TensorInfo(TensorShape(6U, 1U), 1, DataType::F32)})),
223  framework::dataset::make("NumDetectionsInfo",{ TensorInfo(TensorShape(1U), 1, DataType::F32),
229  framework::dataset::make("DetectionPostProcessLayerInfo",{ DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
230  DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
231  DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
232  DetectionPostProcessLayerInfo(3, 1, 0.0f, 1.5f, 2, {0.0f,0.1f,0.1f,0.1f}),
233  DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
234  DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f})})),
235  framework::dataset::make("Expected", {true, false, false, false, false, false })),
236  box_encodings_info, classes_info, anchors_info, output_boxes_info, output_classes_info,output_scores_info, num_detection_info, detect_info, expected)
237 {
238  const Status status = NEDetectionPostProcessLayer::validate(&box_encodings_info.clone()->set_is_resizable(false),
239  &classes_info.clone()->set_is_resizable(false),
240  &anchors_info.clone()->set_is_resizable(false),
241  &output_boxes_info.clone()->set_is_resizable(false),
242  &output_classes_info.clone()->set_is_resizable(false),
243  &output_scores_info.clone()->set_is_resizable(false), &num_detection_info.clone()->set_is_resizable(false), detect_info);
245 }
246 // clang-format on
247 // *INDENT-ON*
248 
251 {
252  DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
253  0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/);
254  // Fill expected detection boxes
256  fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
257  // Fill expected detection classes
259  fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
260  // Fill expected detection scores
262  fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
263  // Fill expected num detections
265  fill_tensor(expected_num_detection, std::vector<float> { 3.f });
266  // Run base test
267  base_test_case(info, DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
268 }
269 
271 {
272  DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
273  0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
274  false /*use_regular_nms*/, 1 /*detections_per_class*/);
275 
276  // Fill expected detection boxes
278  fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
279  // Fill expected detection classes
281  fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
282  // Fill expected detection scores
284  fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
285  // Fill expected num detections
287  fill_tensor(expected_num_detection, std::vector<float> { 3.f });
288 
289  // Run base test
290  base_test_case(info, DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
291 }
292 
294 {
295  DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
296  0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
297  true /*use_regular_nms*/, 1 /*detections_per_class*/);
298 
299  // Fill expected detection boxes
301  fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
302  // Fill expected detection classes
304  fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
305  // Fill expected detection scores
307  fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.91f, 0.0f });
308  // Fill expected num detections
310  fill_tensor(expected_num_detection, std::vector<float> { 2.f });
311 
312  // Run test
313  base_test_case(info, DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
314 }
315 TEST_SUITE_END() // F32
316 
318 TEST_CASE(Quantized_general, framework::DatasetMode::ALL)
319 {
320  DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
321  0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/);
322 
323  // Fill expected detection boxes
325  fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
326  // Fill expected detection classes
328  fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
329  // Fill expected detection scores
331  fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
332  // Fill expected num detections
334  fill_tensor(expected_num_detection, std::vector<float> { 3.f });
335  // Run test
336  base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
337 }
338 
339 TEST_CASE(Quantized_fast, framework::DatasetMode::ALL)
340 {
341  DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
342  0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
343  false /*use_regular_nms*/, 1 /*detections_per_class*/);
344 
345  // Fill expected detection boxes
347  fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
348  // Fill expected detection classes
350  fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
351  // Fill expected detection scores
353  fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
354  // Fill expected num detections
356  fill_tensor(expected_num_detection, std::vector<float> { 3.f });
357 
358  // Run base test
359  base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
360 }
361 
362 TEST_CASE(Quantized_regular, framework::DatasetMode::ALL)
363 {
364  DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
365  0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
366  true /*use_regular_nms*/, 1 /*detections_per_class*/);
367  // Fill expected detection boxes
369  fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
370  // Fill expected detection classes
372  fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
373  // Fill expected detection scores
375  fill_tensor(expected_output_scores, std::vector<float> { 0.95f, 0.91f, 0.0f });
376  // Fill expected num detections
378  fill_tensor(expected_num_detection, std::vector<float> { 2.f });
379 
380  // Run test
381  base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
382 }
383 
384 TEST_SUITE_END() // QASYMM8
385 
386 TEST_SUITE_END() // DetectionPostProcessLayer
387 TEST_SUITE_END() // Neon
388 } // namespace validation
389 } // namespace test
390 } // namespace arm_compute
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:1084
Shape of a tensor.
Definition: TensorShape.h:39
Class reprensenting an absolute tolerance value.
Definition: Validation.h:61
uint8_t quantize_qasymm8(float value, const INFO_TYPE &qinfo, RoundingPolicy rounding_policy=RoundingPolicy::TO_NEAREST_UP)
Quantize a value given an unsigned 8-bit asymmetric quantization scheme.
1 channel, 1 F32 per channel
base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance< float >(0.3f))
ARM_COMPUTE_EXPECT(has_error==expected, framework::LogLevel::ERRORS)
std::enable_if< is_container< T >::value, ContainerDataset< T > >::type make(std::string name, T &&values)
Helper function to create a ContainerDataset.
Status class.
Definition: Error.h:52
Copyright (c) 2017-2022 Arm Limited.
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f/32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected)
DatasetMode
Possible dataset modes.
Definition: DatasetModes.h:40
fill_tensor(input_to_input_weights, std::vector< uint8_t >{ 122, 130, 124, 134, 120, 122, 134, 134 })
TEST_SUITE_END() FIXTURE_DATA_TEST_CASE(RunSmall
[CLActivationLayer Test snippet]
quantized, asymmetric fixed-point 8-bit number unsigned
validate(CLAccessor(output_state), expected_output)
Simple tensor object that stores elements in a consecutive chunk of memory.
Definition: SimpleTensor.h:58
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
SimpleTensor< float > expected_output_scores(TensorShape(3U), DataType::F32)
T round(T value)
Round floating-point value with half value rounding away from zero.
Detection Output layer info.
Definition: Types.h:1081
SimpleTensor< float > expected_output_boxes(TensorShape(4U, 3U), DataType::F32)
CLTensor * tensor
Pointer to the auxiliary tensor.
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
SimpleTensor< float > expected_num_detection(TensorShape(1U), DataType::F32)
TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
Validate fused activation expecting the following behaviours:
zip(zip(framework::dataset::make("Weights", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U, 1U), 1, DataType::F32), }), framework::dataset::make("MVBGInfo",{ TensorInfo(TensorShape(2U), 1, DataType::F32), TensorInfo(TensorShape(2U), 1, DataType::F16), TensorInfo(TensorShape(5U), 1, DataType::F32), })), framework::dataset::make("Expected", { true, false, false}))
TEST_SUITE(QASYMM8_to_F32) FIXTURE_DATA_TEST_CASE(RunSmall
DataType
Available data types.
Definition: Types.h:79
SimpleTensor< float > expected_output_classes(TensorShape(3U), DataType::F32)
signed 8-bit number
static Status validate(const ITensorInfo *input_box_encoding, const ITensorInfo *input_class_score, const ITensorInfo *input_anchors, ITensorInfo *output_boxes, ITensorInfo *output_classes, ITensorInfo *output_scores, ITensorInfo *num_detection, DetectionPostProcessLayerInfo info=DetectionPostProcessLayerInfo())
Static function to check if given info will lead to a valid configuration of NEDetectionPostProcessLa...