Compute Library
 23.11
CPPDetectionOutputLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
29 
30 #include "src/common/utils/Log.h"
32 
33 #include <list>
34 
35 namespace arm_compute
36 {
37 namespace
38 {
39 Status validate_arguments(const ITensorInfo *input_loc,
40  const ITensorInfo *input_conf,
41  const ITensorInfo *input_priorbox,
42  const ITensorInfo *output,
43  DetectionOutputLayerInfo info)
44 {
45  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input_loc, input_conf, input_priorbox, output);
47  ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_loc, input_conf, input_priorbox);
48  ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_loc->num_dimensions() > 2, "The location input tensor should be [C1, N].");
49  ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_conf->num_dimensions() > 2, "The location input tensor should be [C2, N].");
50  ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_priorbox->num_dimensions() > 3,
51  "The priorbox input tensor should be [C3, 2, N].");
52 
53  ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.eta() <= 0.f && info.eta() > 1.f, "Eta should be between 0 and 1");
54 
55  const int num_priors = input_priorbox->tensor_shape()[0] / 4;
56  ARM_COMPUTE_RETURN_ERROR_ON_MSG(static_cast<size_t>((num_priors * info.num_loc_classes() * 4)) !=
57  input_loc->tensor_shape()[0],
58  "Number of priors must match number of location predictions.");
59  ARM_COMPUTE_RETURN_ERROR_ON_MSG(static_cast<size_t>((num_priors * info.num_classes())) !=
60  input_conf->tensor_shape()[0],
61  "Number of priors must match number of confidence predictions.");
62 
63  // Validate configured output
64  if (output->total_size() != 0)
65  {
66  const unsigned int max_size =
67  info.keep_top_k() * (input_loc->num_dimensions() > 1 ? input_loc->dimension(1) : 1);
68  ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), TensorShape(7U, max_size));
70  }
71 
72  return Status{};
73 }
74 
75 /** Function used to sort pair<float, T> in descend order based on the score (first) value.
76  */
77 template <typename T>
78 bool SortScorePairDescend(const std::pair<float, T> &pair1, const std::pair<float, T> &pair2)
79 {
80  return pair1.first > pair2.first;
81 }
82 
83 /** Get location predictions from input_loc.
84  *
85  * @param[in] input_loc The input location prediction.
86  * @param[in] num The number of images.
87  * @param[in] num_priors number of predictions per class.
88  * @param[in] num_loc_classes number of location classes. It is 1 if share_location is true,
89  * and is equal to number of classes needed to predict otherwise.
90  * @param[in] share_location If true, all classes share the same location prediction.
91  * @param[out] all_location_predictions All the location predictions.
92  *
93  */
94 void retrieve_all_loc_predictions(const ITensor *input_loc,
95  const int num,
96  const int num_priors,
97  const int num_loc_classes,
98  const bool share_location,
99  std::vector<LabelBBox> &all_location_predictions)
100 {
101  for (int i = 0; i < num; ++i)
102  {
103  for (int c = 0; c < num_loc_classes; ++c)
104  {
105  int label = share_location ? -1 : c;
106  if (all_location_predictions[i].find(label) == all_location_predictions[i].end())
107  {
108  all_location_predictions[i][label].resize(num_priors);
109  }
110  else
111  {
112  ARM_COMPUTE_ERROR_ON(all_location_predictions[i][label].size() != static_cast<size_t>(num_priors));
113  break;
114  }
115  }
116  }
117  for (int i = 0; i < num; ++i)
118  {
119  for (int p = 0; p < num_priors; ++p)
120  {
121  for (int c = 0; c < num_loc_classes; ++c)
122  {
123  const int label = share_location ? -1 : c;
124  const int base_ptr = i * num_priors * num_loc_classes * 4 + p * num_loc_classes * 4 + c * 4;
125  //xmin, ymin, xmax, ymax
126  all_location_predictions[i][label][p][0] =
127  *reinterpret_cast<float *>(input_loc->ptr_to_element(Coordinates(base_ptr)));
128  all_location_predictions[i][label][p][1] =
129  *reinterpret_cast<float *>(input_loc->ptr_to_element(Coordinates(base_ptr + 1)));
130  all_location_predictions[i][label][p][2] =
131  *reinterpret_cast<float *>(input_loc->ptr_to_element(Coordinates(base_ptr + 2)));
132  all_location_predictions[i][label][p][3] =
133  *reinterpret_cast<float *>(input_loc->ptr_to_element(Coordinates(base_ptr + 3)));
134  }
135  }
136  }
137 }
138 
139 /** Get confidence predictions from input_conf.
140  *
141  * @param[in] input_loc The input location prediction.
142  * @param[in] num The number of images.
143  * @param[in] num_priors Number of predictions per class.
144  * @param[in] num_loc_classes Number of location classes. It is 1 if share_location is true,
145  * and is equal to number of classes needed to predict otherwise.
146  * @param[out] all_location_predictions All the location predictions.
147  *
148  */
149 void retrieve_all_conf_scores(const ITensor *input_conf,
150  const int num,
151  const int num_priors,
152  const int num_classes,
153  std::vector<std::map<int, std::vector<float>>> &all_confidence_scores)
154 {
155  std::vector<float> tmp_buffer;
156  tmp_buffer.resize(num * num_priors * num_classes);
157  for (int i = 0; i < num; ++i)
158  {
159  for (int c = 0; c < num_classes; ++c)
160  {
161  for (int p = 0; p < num_priors; ++p)
162  {
163  tmp_buffer[i * num_classes * num_priors + c * num_priors + p] = *reinterpret_cast<float *>(
164  input_conf->ptr_to_element(Coordinates(i * num_classes * num_priors + p * num_classes + c)));
165  }
166  }
167  }
168  for (int i = 0; i < num; ++i)
169  {
170  for (int c = 0; c < num_classes; ++c)
171  {
172  all_confidence_scores[i][c].resize(num_priors);
173  all_confidence_scores[i][c].assign(&tmp_buffer[i * num_classes * num_priors + c * num_priors],
174  &tmp_buffer[i * num_classes * num_priors + c * num_priors + num_priors]);
175  }
176  }
177 }
178 
179 /** Get prior boxes from input_priorbox.
180  *
181  * @param[in] input_priorbox The input location prediction.
182  * @param[in] num_priors Number of priors.
183  * @param[in] num_loc_classes number of location classes. It is 1 if share_location is true,
184  * and is equal to number of classes needed to predict otherwise.
185  * @param[out] all_prior_bboxes If true, all classes share the same location prediction.
186  * @param[out] all_location_predictions All the location predictions.
187  *
188  */
189 void retrieve_all_priorbox(const ITensor *input_priorbox,
190  const int num_priors,
191  std::vector<BBox> &all_prior_bboxes,
192  std::vector<std::array<float, 4>> &all_prior_variances)
193 {
194  for (int i = 0; i < num_priors; ++i)
195  {
196  all_prior_bboxes[i] = {{*reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates(i * 4))),
197  *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates(i * 4 + 1))),
198  *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates(i * 4 + 2))),
199  *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates(i * 4 + 3)))}};
200  }
201 
202  std::array<float, 4> var({{0, 0, 0, 0}});
203  for (int i = 0; i < num_priors; ++i)
204  {
205  for (int j = 0; j < 4; ++j)
206  {
207  var[j] = *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates((num_priors + i) * 4 + j)));
208  }
209  all_prior_variances[i] = var;
210  }
211 }
212 
213 /** Decode a bbox according to a prior bbox.
214  *
215  * @param[in] prior_bbox The input prior bounding boxes.
216  * @param[in] prior_variance The corresponding input variance.
217  * @param[in] code_type The detection output code type used to decode the results.
218  * @param[in] variance_encoded_in_target If true, the variance is encoded in target.
219  * @param[in] clip_bbox If true, the results should be between 0.f and 1.f.
220  * @param[in] bbox The input bbox to decode
221  * @param[out] decode_bbox The decoded bboxes.
222  *
223  */
224 void DecodeBBox(const BBox &prior_bbox,
225  const std::array<float, 4> &prior_variance,
226  const DetectionOutputLayerCodeType code_type,
227  const bool variance_encoded_in_target,
228  const bool clip_bbox,
229  const BBox &bbox,
230  BBox &decode_bbox)
231 {
232  // if the variance is encoded in target, we simply need to add the offset predictions
233  // otherwise we need to scale the offset accordingly.
234  switch (code_type)
235  {
237  {
238  decode_bbox[0] = prior_bbox[0] + (variance_encoded_in_target ? bbox[0] : prior_variance[0] * bbox[0]);
239  decode_bbox[1] = prior_bbox[1] + (variance_encoded_in_target ? bbox[1] : prior_variance[1] * bbox[1]);
240  decode_bbox[2] = prior_bbox[2] + (variance_encoded_in_target ? bbox[2] : prior_variance[2] * bbox[2]);
241  decode_bbox[3] = prior_bbox[3] + (variance_encoded_in_target ? bbox[3] : prior_variance[3] * bbox[3]);
242 
243  break;
244  }
246  {
247  const float prior_width = prior_bbox[2] - prior_bbox[0];
248  const float prior_height = prior_bbox[3] - prior_bbox[1];
249 
250  // Check if the prior width and height are right
251  ARM_COMPUTE_ERROR_ON(prior_width <= 0.f);
252  ARM_COMPUTE_ERROR_ON(prior_height <= 0.f);
253 
254  const float prior_center_x = (prior_bbox[0] + prior_bbox[2]) / 2.;
255  const float prior_center_y = (prior_bbox[1] + prior_bbox[3]) / 2.;
256 
257  const float decode_bbox_center_x =
258  (variance_encoded_in_target ? bbox[0] : prior_variance[0] * bbox[0]) * prior_width + prior_center_x;
259  const float decode_bbox_center_y =
260  (variance_encoded_in_target ? bbox[1] : prior_variance[1] * bbox[1]) * prior_height + prior_center_y;
261  const float decode_bbox_width =
262  (variance_encoded_in_target ? std::exp(bbox[2]) : std::exp(prior_variance[2] * bbox[2])) * prior_width;
263  const float decode_bbox_height =
264  (variance_encoded_in_target ? std::exp(bbox[3]) : std::exp(prior_variance[3] * bbox[3])) * prior_height;
265 
266  decode_bbox[0] = (decode_bbox_center_x - decode_bbox_width / 2.f);
267  decode_bbox[1] = (decode_bbox_center_y - decode_bbox_height / 2.f);
268  decode_bbox[2] = (decode_bbox_center_x + decode_bbox_width / 2.f);
269  decode_bbox[3] = (decode_bbox_center_y + decode_bbox_height / 2.f);
270 
271  break;
272  }
274  {
275  const float prior_width = prior_bbox[2] - prior_bbox[0];
276  const float prior_height = prior_bbox[3] - prior_bbox[1];
277 
278  // Check if the prior width and height are greater than 0
279  ARM_COMPUTE_ERROR_ON(prior_width <= 0.f);
280  ARM_COMPUTE_ERROR_ON(prior_height <= 0.f);
281 
282  decode_bbox[0] =
283  prior_bbox[0] + (variance_encoded_in_target ? bbox[0] : prior_variance[0] * bbox[0]) * prior_width;
284  decode_bbox[1] =
285  prior_bbox[1] + (variance_encoded_in_target ? bbox[1] : prior_variance[1] * bbox[1]) * prior_height;
286  decode_bbox[2] =
287  prior_bbox[2] + (variance_encoded_in_target ? bbox[2] : prior_variance[2] * bbox[2]) * prior_width;
288  decode_bbox[3] =
289  prior_bbox[3] + (variance_encoded_in_target ? bbox[3] : prior_variance[3] * bbox[3]) * prior_height;
290 
291  break;
292  }
293  default:
294  ARM_COMPUTE_ERROR("Unsupported Detection Output Code Type.");
295  }
296 
297  if (clip_bbox)
298  {
299  for (auto &d_bbox : decode_bbox)
300  {
301  d_bbox = utility::clamp(d_bbox, 0.f, 1.f);
302  }
303  }
304 }
305 
306 /** Do non maximum suppression given bboxes and scores.
307  *
308  * @param[in] bboxes The input bounding boxes.
309  * @param[in] scores The corresponding input confidence.
310  * @param[in] score_threshold The threshold used to filter detection results.
311  * @param[in] nms_threshold The threshold used in non maximum suppression.
312  * @param[in] eta Adaptation rate for nms threshold.
313  * @param[in] top_k If not -1, keep at most top_k picked indices.
314  * @param[out] indices The kept indices of bboxes after nms.
315  *
316  */
317 void ApplyNMSFast(const std::vector<BBox> &bboxes,
318  const std::vector<float> &scores,
319  const float score_threshold,
320  const float nms_threshold,
321  const float eta,
322  const int top_k,
323  std::vector<int> &indices)
324 {
325  ARM_COMPUTE_ERROR_ON_MSG(bboxes.size() != scores.size(), "bboxes and scores have different size.");
326 
327  // Get top_k scores (with corresponding indices).
328  std::list<std::pair<float, int>> score_index_vec;
329 
330  // Generate index score pairs.
331  for (size_t i = 0; i < scores.size(); ++i)
332  {
333  if (scores[i] > score_threshold)
334  {
335  score_index_vec.emplace_back(std::make_pair(scores[i], i));
336  }
337  }
338 
339  // Sort the score pair according to the scores in descending order
340  score_index_vec.sort(SortScorePairDescend<int>);
341 
342  // Keep top_k scores if needed.
343  const int score_index_vec_size = score_index_vec.size();
344  if (top_k > -1 && top_k < score_index_vec_size)
345  {
346  score_index_vec.resize(top_k);
347  }
348 
349  // Do nms.
350  float adaptive_threshold = nms_threshold;
351  indices.clear();
352 
353  while (!score_index_vec.empty())
354  {
355  const int idx = score_index_vec.front().second;
356  bool keep = true;
357  for (int kept_idx : indices)
358  {
359  if (keep)
360  {
361  // Compute the jaccard (intersection over union IoU) overlap between two bboxes.
362  BBox intersect_bbox = std::array<float, 4>({0, 0, 0, 0});
363  if (bboxes[kept_idx][0] > bboxes[idx][2] || bboxes[kept_idx][2] < bboxes[idx][0] ||
364  bboxes[kept_idx][1] > bboxes[idx][3] || bboxes[kept_idx][3] < bboxes[idx][1])
365  {
366  intersect_bbox = std::array<float, 4>({{0, 0, 0, 0}});
367  }
368  else
369  {
370  intersect_bbox = std::array<float, 4>(
371  {{std::max(bboxes[idx][0], bboxes[kept_idx][0]), std::max(bboxes[idx][1], bboxes[kept_idx][1]),
372  std::min(bboxes[idx][2], bboxes[kept_idx][2]),
373  std::min(bboxes[idx][3], bboxes[kept_idx][3])}});
374  }
375 
376  float intersect_width = intersect_bbox[2] - intersect_bbox[0];
377  float intersect_height = intersect_bbox[3] - intersect_bbox[1];
378 
379  float overlap = 0.f;
380  if (intersect_width > 0 && intersect_height > 0)
381  {
382  float intersect_size = intersect_width * intersect_height;
383  float bbox1_size = (bboxes[idx][2] < bboxes[idx][0] || bboxes[idx][3] < bboxes[idx][1])
384  ? 0.f
385  : (bboxes[idx][2] - bboxes[idx][0]) *
386  (bboxes[idx][3] - bboxes[idx][1]); //BBoxSize(bboxes[idx]);
387  float bbox2_size =
388  (bboxes[kept_idx][2] < bboxes[kept_idx][0] || bboxes[kept_idx][3] < bboxes[kept_idx][1])
389  ? 0.f
390  : (bboxes[kept_idx][2] - bboxes[kept_idx][0]) *
391  (bboxes[kept_idx][3] - bboxes[kept_idx][1]); // BBoxSize(bboxes[kept_idx]);
392  overlap = intersect_size / (bbox1_size + bbox2_size - intersect_size);
393  }
394  keep = (overlap <= adaptive_threshold);
395  }
396  else
397  {
398  break;
399  }
400  }
401  if (keep)
402  {
403  indices.push_back(idx);
404  }
405  score_index_vec.erase(score_index_vec.begin());
406  if (keep && eta < 1.f && adaptive_threshold > 0.5f)
407  {
408  adaptive_threshold *= eta;
409  }
410  }
411 }
412 } // namespace
413 
415  : _input_loc(nullptr),
416  _input_conf(nullptr),
417  _input_priorbox(nullptr),
418  _output(nullptr),
419  _info(),
420  _num_priors(),
421  _num(),
422  _all_location_predictions(),
423  _all_confidence_scores(),
424  _all_prior_bboxes(),
425  _all_prior_variances(),
426  _all_decode_bboxes(),
427  _all_indices()
428 {
429 }
430 
432  const ITensor *input_conf,
433  const ITensor *input_priorbox,
434  ITensor *output,
436 {
437  ARM_COMPUTE_ERROR_ON_NULLPTR(input_loc, input_conf, input_priorbox, output);
438  ARM_COMPUTE_LOG_PARAMS(input_loc, input_conf, input_priorbox, output, info);
439 
440  // Output auto initialization if not yet initialized
441  // Since the number of bboxes to kept is unknown before nms, the shape is set to the maximum
442  // The maximum is keep_top_k * input_loc_size[1]
443  // Each row is a 7 dimension std::vector, which stores [image_id, label, confidence, xmin, ymin, xmax, ymax]
444  const unsigned int max_size =
445  info.keep_top_k() * (input_loc->info()->num_dimensions() > 1 ? input_loc->info()->dimension(1) : 1);
446  auto_init_if_empty(*output->info(), input_loc->info()->clone()->set_tensor_shape(TensorShape(7U, max_size)));
447 
448  // Perform validation step
450  validate_arguments(input_loc->info(), input_conf->info(), input_priorbox->info(), output->info(), info));
451 
452  _input_loc = input_loc;
453  _input_conf = input_conf;
454  _input_priorbox = input_priorbox;
455  _output = output;
456  _info = info;
457  _num_priors = input_priorbox->info()->dimension(0) / 4;
458  _num = (_input_loc->info()->num_dimensions() > 1 ? _input_loc->info()->dimension(1) : 1);
459 
460  _all_location_predictions.resize(_num);
461  _all_confidence_scores.resize(_num);
462  _all_prior_bboxes.resize(_num_priors);
463  _all_prior_variances.resize(_num_priors);
464  _all_decode_bboxes.resize(_num);
465 
466  for (int i = 0; i < _num; ++i)
467  {
468  for (int c = 0; c < _info.num_loc_classes(); ++c)
469  {
470  const int label = _info.share_location() ? -1 : c;
471  if (label == _info.background_label_id())
472  {
473  // Ignore background class.
474  continue;
475  }
476  _all_decode_bboxes[i][label].resize(_num_priors);
477  }
478  }
479  _all_indices.resize(_num);
480 
481  Coordinates coord;
482  coord.set_num_dimensions(output->info()->num_dimensions());
483  output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
484 }
485 
487  const ITensorInfo *input_conf,
488  const ITensorInfo *input_priorbox,
489  const ITensorInfo *output,
491 {
492  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input_loc, input_conf, input_priorbox, output, info));
493  return Status{};
494 }
495 
497 {
498  // Retrieve all location predictions.
499  retrieve_all_loc_predictions(_input_loc, _num, _num_priors, _info.num_loc_classes(), _info.share_location(),
500  _all_location_predictions);
501 
502  // Retrieve all confidences.
503  retrieve_all_conf_scores(_input_conf, _num, _num_priors, _info.num_classes(), _all_confidence_scores);
504 
505  // Retrieve all prior bboxes.
506  retrieve_all_priorbox(_input_priorbox, _num_priors, _all_prior_bboxes, _all_prior_variances);
507 
508  // Decode all loc predictions to bboxes
509  const bool clip_bbox = false;
510  for (int i = 0; i < _num; ++i)
511  {
512  for (int c = 0; c < _info.num_loc_classes(); ++c)
513  {
514  const int label = _info.share_location() ? -1 : c;
515  if (label == _info.background_label_id())
516  {
517  // Ignore background class.
518  continue;
519  }
520  ARM_COMPUTE_ERROR_ON_MSG_VAR(_all_location_predictions[i].find(label) == _all_location_predictions[i].end(),
521  "Could not find location predictions for label %d.", label);
522 
523  const std::vector<BBox> &label_loc_preds = _all_location_predictions[i].find(label)->second;
524 
525  const int num_bboxes = _all_prior_bboxes.size();
526  ARM_COMPUTE_ERROR_ON(_all_prior_variances[i].size() != 4);
527 
528  for (int j = 0; j < num_bboxes; ++j)
529  {
530  DecodeBBox(_all_prior_bboxes[j], _all_prior_variances[j], _info.code_type(),
531  _info.variance_encoded_in_target(), clip_bbox, label_loc_preds[j],
532  _all_decode_bboxes[i][label][j]);
533  }
534  }
535  }
536 
537  int num_kept = 0;
538 
539  for (int i = 0; i < _num; ++i)
540  {
541  const LabelBBox &decode_bboxes = _all_decode_bboxes[i];
542  const std::map<int, std::vector<float>> &conf_scores = _all_confidence_scores[i];
543 
544  std::map<int, std::vector<int>> indices;
545  int num_det = 0;
546  for (int c = 0; c < _info.num_classes(); ++c)
547  {
548  if (c == _info.background_label_id())
549  {
550  // Ignore background class
551  continue;
552  }
553  const int label = _info.share_location() ? -1 : c;
554  if (conf_scores.find(c) == conf_scores.end() || decode_bboxes.find(label) == decode_bboxes.end())
555  {
556  ARM_COMPUTE_ERROR_VAR("Could not find predictions for label %d.", label);
557  }
558  const std::vector<float> &scores = conf_scores.find(c)->second;
559  const std::vector<BBox> &bboxes = decode_bboxes.find(label)->second;
560 
561  ApplyNMSFast(bboxes, scores, _info.confidence_threshold(), _info.nms_threshold(), _info.eta(),
562  _info.top_k(), indices[c]);
563 
564  num_det += indices[c].size();
565  }
566 
567  int num_to_add = 0;
568  if (_info.keep_top_k() > -1 && num_det > _info.keep_top_k())
569  {
570  std::vector<std::pair<float, std::pair<int, int>>> score_index_pairs;
571  for (auto const &it : indices)
572  {
573  const int label = it.first;
574  const std::vector<int> &label_indices = it.second;
575 
576  if (conf_scores.find(label) == conf_scores.end())
577  {
578  ARM_COMPUTE_ERROR_VAR("Could not find predictions for label %d.", label);
579  }
580 
581  const std::vector<float> &scores = conf_scores.find(label)->second;
582  for (auto idx : label_indices)
583  {
584  ARM_COMPUTE_ERROR_ON(idx > static_cast<int>(scores.size()));
585  score_index_pairs.emplace_back(std::make_pair(scores[idx], std::make_pair(label, idx)));
586  }
587  }
588 
589  // Keep top k results per image.
590  std::sort(score_index_pairs.begin(), score_index_pairs.end(), SortScorePairDescend<std::pair<int, int>>);
591  score_index_pairs.resize(_info.keep_top_k());
592 
593  // Store the new indices.
594 
595  std::map<int, std::vector<int>> new_indices;
596  for (auto score_index_pair : score_index_pairs)
597  {
598  int label = score_index_pair.second.first;
599  int idx = score_index_pair.second.second;
600  new_indices[label].push_back(idx);
601  }
602  _all_indices[i] = new_indices;
603  num_to_add = _info.keep_top_k();
604  }
605  else
606  {
607  _all_indices[i] = indices;
608  num_to_add = num_det;
609  }
610  num_kept += num_to_add;
611  }
612 
613  //Update the valid region of the ouput to mark the exact number of detection
614  _output->info()->set_valid_region(ValidRegion(Coordinates(0, 0), TensorShape(7, num_kept)));
615 
616  int count = 0;
617  for (int i = 0; i < _num; ++i)
618  {
619  const std::map<int, std::vector<float>> &conf_scores = _all_confidence_scores[i];
620  const LabelBBox &decode_bboxes = _all_decode_bboxes[i];
621  for (auto &it : _all_indices[i])
622  {
623  const int label = it.first;
624  const std::vector<float> &scores = conf_scores.find(label)->second;
625  const int loc_label = _info.share_location() ? -1 : label;
626  if (conf_scores.find(label) == conf_scores.end() || decode_bboxes.find(loc_label) == decode_bboxes.end())
627  {
628  // Either if there are no confidence predictions
629  // or there are no location predictions for current label.
630  ARM_COMPUTE_ERROR_VAR("Could not find predictions for the label %d.", label);
631  }
632  const std::vector<BBox> &bboxes = decode_bboxes.find(loc_label)->second;
633  const std::vector<int> &indices = it.second;
634 
635  for (auto idx : indices)
636  {
637  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7)))) = i;
638  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 1)))) = label;
639  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 2)))) = scores[idx];
640  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 3)))) = bboxes[idx][0];
641  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 4)))) = bboxes[idx][1];
642  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 5)))) = bboxes[idx][2];
643  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 6)))) = bboxes[idx][3];
644 
645  ++count;
646  }
647  }
648  }
649 }
650 } // namespace arm_compute
arm_compute::DetectionOutputLayerInfo::variance_encoded_in_target
bool variance_encoded_in_target() const
Get if variance encoded in target.
Definition: Types.h:862
arm_compute::DetectionOutputLayerInfo::share_location
bool share_location() const
Get share location.
Definition: Types.h:852
arm_compute::DetectionOutputLayerInfo::num_classes
int num_classes() const
Get num classes.
Definition: Types.h:847
arm_compute::ITensorInfo::tensor_shape
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
arm_compute::DetectionOutputLayerInfo::keep_top_k
int keep_top_k() const
Get the number of total bounding boxes to be kept per image.
Definition: Types.h:867
Helpers.h
ARM_COMPUTE_ERROR_ON_MSG_VAR
#define ARM_COMPUTE_ERROR_ON_MSG_VAR(cond, msg,...)
Definition: Error.h:457
arm_compute::DetectionOutputLayerCodeType
DetectionOutputLayerCodeType
Available Detection Output code types.
Definition: Types.h:781
arm_compute::TensorShape
Shape of a tensor.
Definition: TensorShape.h:39
arm_compute::CPPDetectionOutputLayer::run
void run() override
Run the kernels contained in the function.
Definition: CPPDetectionOutputLayer.cpp:496
arm_compute::DetectionOutputLayerCodeType::CORNER_SIZE
@ CORNER_SIZE
Use box centers and size.
arm_compute::cpu::kernels::validate_arguments
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const PadStrideInfo &conv_info)
Definition: CpuDirectConv2dKernel.cpp:57
ARM_COMPUTE_ERROR_VAR
#define ARM_COMPUTE_ERROR_VAR(msg,...)
Print the given message then throw an std::runtime_error.
Definition: Error.h:348
ARM_COMPUTE_ERROR
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:354
arm_compute::ITensor
Interface for CPU tensor.
Definition: ITensor.h:36
Error.h
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:677
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:952
ARM_COMPUTE_RETURN_ON_ERROR
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:205
arm_compute::ITensorInfo::dimension
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
arm_compute::CPPDetectionOutputLayer::validate
static Status validate(const ITensorInfo *input_loc, const ITensorInfo *input_conf, const ITensorInfo *input_priorbox, const ITensorInfo *output, DetectionOutputLayerInfo info=DetectionOutputLayerInfo())
Static function to check if given info will lead to a valid configuration of CPPDetectionOutputLayer.
Definition: CPPDetectionOutputLayer.cpp:486
arm_compute::DetectionOutputLayerCodeType::CORNER
@ CORNER
Use box corners.
arm_compute::BBox
std::array< float, 4 > BBox
Definition: Types.h:776
ARM_COMPUTE_ERROR_ON_NULLPTR
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:159
arm_compute::utils::cast::U
U
Definition: SaturateCast.h:65
arm_compute::utility::clamp
DataType clamp(const DataType &n, const DataType &lower=std::numeric_limits< RangeType >::lowest(), const DataType &upper=std::numeric_limits< RangeType >::max())
Performs clamping among a lower and upper value.
Definition: Utility.h:102
arm_compute::ITensor::info
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
ARM_COMPUTE_ERROR_ON
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
ARM_COMPUTE_ERROR_THROW_ON
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
arm_compute::DetectionOutputLayerInfo::confidence_threshold
float confidence_threshold() const
Get confidence threshold.
Definition: Types.h:887
ARM_COMPUTE_ERROR_ON_MSG
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
arm_compute::auto_init_if_empty
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
Definition: AutoConfiguration.h:43
arm_compute::ValidRegion
Container for valid region of a window.
Definition: Types.h:143
arm_compute::CPPDetectionOutputLayer::configure
void configure(const ITensor *input_loc, const ITensor *input_conf, const ITensor *input_priorbox, ITensor *output, DetectionOutputLayerInfo info=DetectionOutputLayerInfo())
Configure the detection output layer CPP kernel.
Definition: CPPDetectionOutputLayer.cpp:431
arm_compute::Status
Status class.
Definition: Error.h:52
arm_compute::DetectionOutputLayerInfo
Detection Output layer info.
Definition: Types.h:790
arm_compute::DetectionOutputLayerInfo::background_label_id
int background_label_id() const
Get background label ID.
Definition: Types.h:882
arm_compute::DetectionOutputLayerInfo::top_k
int top_k() const
Get top K.
Definition: Types.h:892
arm_compute::Coordinates
Coordinates of an item.
Definition: Coordinates.h:37
arm_compute::CPPDetectionOutputLayer::CPPDetectionOutputLayer
CPPDetectionOutputLayer()
Default constructor.
Definition: CPPDetectionOutputLayer.cpp:414
arm_compute::DetectionOutputLayerInfo::eta
float eta() const
Get eta.
Definition: Types.h:877
arm_compute::utils::map
void map(T &tensor, bool blocking)
Maps a tensor if needed.
Definition: Utils.h:213
AutoConfiguration.h
arm_compute::DetectionOutputLayerInfo::nms_threshold
float nms_threshold() const
Get nms threshold.
Definition: Types.h:872
arm_compute::misc::ICloneable::clone
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
arm_compute::DetectionOutputLayerInfo::num_loc_classes
int num_loc_classes() const
Get number of location classes.
Definition: Types.h:897
arm_compute::DetectionOutputLayerInfo::code_type
DetectionOutputLayerCodeType code_type() const
Get detection output code type.
Definition: Types.h:857
ARM_COMPUTE_RETURN_ERROR_ON_MSG
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:245
arm_compute
Copyright (c) 2017-2023 Arm Limited.
Definition: introduction.dox:24
arm_compute::ITensorInfo::set_valid_region
virtual void set_valid_region(const ValidRegion &valid_region)=0
Set the valid region of the tensor.
arm_compute::ITensor::ptr_to_element
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
Definition: ITensor.h:63
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(...)
Definition: Validate.h:294
arm_compute::Dimensions::set_num_dimensions
void set_num_dimensions(size_t num_dimensions)
Set number of dimensions.
Definition: Dimensions.h:148
arm_compute::mlgo::parser::end
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:283
Log.h
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
CPPDetectionOutputLayer.h
arm_compute::ITensorInfo
Store the tensor's metadata.
Definition: ITensorInfo.h:44
arm_compute::DetectionOutputLayerCodeType::CENTER_SIZE
@ CENTER_SIZE
Use box centers and size.
arm_compute::DataType::F32
@ F32
32-bit floating-point number
arm_compute::test::validation::info
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
ARM_COMPUTE_LOG_PARAMS
#define ARM_COMPUTE_LOG_PARAMS(...)
Definition: Log.h:35
Validate.h
arm_compute::ITensorInfo::num_dimensions
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
arm_compute::LabelBBox
std::map< int, std::vector< BBox > > LabelBBox
Definition: Types.h:778