Compute Library
 21.02
CPPDetectionOutputLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
25 
26 #include "arm_compute/core/Error.h"
30 
31 #include <list>
32 
33 namespace arm_compute
34 {
35 namespace
36 {
37 Status validate_arguments(const ITensorInfo *input_loc, const ITensorInfo *input_conf, const ITensorInfo *input_priorbox, const ITensorInfo *output, DetectionOutputLayerInfo info)
38 {
39  ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input_loc, input_conf, input_priorbox, output);
41  ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_loc, input_conf, input_priorbox);
42  ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_loc->num_dimensions() > 2, "The location input tensor should be [C1, N].");
43  ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_conf->num_dimensions() > 2, "The location input tensor should be [C2, N].");
44  ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_priorbox->num_dimensions() > 3, "The priorbox input tensor should be [C3, 2, N].");
45 
46  ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.eta() <= 0.f && info.eta() > 1.f, "Eta should be between 0 and 1");
47 
48  const int num_priors = input_priorbox->tensor_shape()[0] / 4;
49  ARM_COMPUTE_RETURN_ERROR_ON_MSG(static_cast<size_t>((num_priors * info.num_loc_classes() * 4)) != input_loc->tensor_shape()[0], "Number of priors must match number of location predictions.");
50  ARM_COMPUTE_RETURN_ERROR_ON_MSG(static_cast<size_t>((num_priors * info.num_classes())) != input_conf->tensor_shape()[0], "Number of priors must match number of confidence predictions.");
51 
52  // Validate configured output
53  if(output->total_size() != 0)
54  {
55  const unsigned int max_size = info.keep_top_k() * (input_loc->num_dimensions() > 1 ? input_loc->dimension(1) : 1);
56  ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), TensorShape(7U, max_size));
58  }
59 
60  return Status{};
61 }
62 
63 /** Function used to sort pair<float, T> in descend order based on the score (first) value.
64  */
65 template <typename T>
66 bool SortScorePairDescend(const std::pair<float, T> &pair1,
67  const std::pair<float, T> &pair2)
68 {
69  return pair1.first > pair2.first;
70 }
71 
72 /** Get location predictions from input_loc.
73  *
74  * @param[in] input_loc The input location prediction.
75  * @param[in] num The number of images.
76  * @param[in] num_priors number of predictions per class.
77  * @param[in] num_loc_classes number of location classes. It is 1 if share_location is true,
78  * and is equal to number of classes needed to predict otherwise.
79  * @param[in] share_location If true, all classes share the same location prediction.
80  * @param[out] all_location_predictions All the location predictions.
81  *
82  */
83 void retrieve_all_loc_predictions(const ITensor *input_loc, const int num,
84  const int num_priors, const int num_loc_classes,
85  const bool share_location, std::vector<LabelBBox> &all_location_predictions)
86 {
87  for(int i = 0; i < num; ++i)
88  {
89  for(int c = 0; c < num_loc_classes; ++c)
90  {
91  int label = share_location ? -1 : c;
92  if(all_location_predictions[i].find(label) == all_location_predictions[i].end())
93  {
94  all_location_predictions[i][label].resize(num_priors);
95  }
96  else
97  {
98  ARM_COMPUTE_ERROR_ON(all_location_predictions[i][label].size() != static_cast<size_t>(num_priors));
99  break;
100  }
101  }
102  }
103  for(int i = 0; i < num; ++i)
104  {
105  for(int p = 0; p < num_priors; ++p)
106  {
107  for(int c = 0; c < num_loc_classes; ++c)
108  {
109  const int label = share_location ? -1 : c;
110  const int base_ptr = i * num_priors * num_loc_classes * 4 + p * num_loc_classes * 4 + c * 4;
111  //xmin, ymin, xmax, ymax
112  all_location_predictions[i][label][p][0] = *reinterpret_cast<float *>(input_loc->ptr_to_element(Coordinates(base_ptr)));
113  all_location_predictions[i][label][p][1] = *reinterpret_cast<float *>(input_loc->ptr_to_element(Coordinates(base_ptr + 1)));
114  all_location_predictions[i][label][p][2] = *reinterpret_cast<float *>(input_loc->ptr_to_element(Coordinates(base_ptr + 2)));
115  all_location_predictions[i][label][p][3] = *reinterpret_cast<float *>(input_loc->ptr_to_element(Coordinates(base_ptr + 3)));
116  }
117  }
118  }
119 }
120 
121 /** Get confidence predictions from input_conf.
122  *
123  * @param[in] input_loc The input location prediction.
124  * @param[in] num The number of images.
125  * @param[in] num_priors Number of predictions per class.
126  * @param[in] num_loc_classes Number of location classes. It is 1 if share_location is true,
127  * and is equal to number of classes needed to predict otherwise.
128  * @param[out] all_location_predictions All the location predictions.
129  *
130  */
131 void retrieve_all_conf_scores(const ITensor *input_conf, const int num,
132  const int num_priors, const int num_classes,
133  std::vector<std::map<int, std::vector<float>>> &all_confidence_scores)
134 {
135  std::vector<float> tmp_buffer;
136  tmp_buffer.resize(num * num_priors * num_classes);
137  for(int i = 0; i < num; ++i)
138  {
139  for(int c = 0; c < num_classes; ++c)
140  {
141  for(int p = 0; p < num_priors; ++p)
142  {
143  tmp_buffer[i * num_classes * num_priors + c * num_priors + p] =
144  *reinterpret_cast<float *>(input_conf->ptr_to_element(Coordinates(i * num_classes * num_priors + p * num_classes + c)));
145  }
146  }
147  }
148  for(int i = 0; i < num; ++i)
149  {
150  for(int c = 0; c < num_classes; ++c)
151  {
152  all_confidence_scores[i][c].resize(num_priors);
153  all_confidence_scores[i][c].assign(&tmp_buffer[i * num_classes * num_priors + c * num_priors],
154  &tmp_buffer[i * num_classes * num_priors + c * num_priors + num_priors]);
155  }
156  }
157 }
158 
159 /** Get prior boxes from input_priorbox.
160  *
161  * @param[in] input_priorbox The input location prediction.
162  * @param[in] num_priors Number of priors.
163  * @param[in] num_loc_classes number of location classes. It is 1 if share_location is true,
164  * and is equal to number of classes needed to predict otherwise.
165  * @param[out] all_prior_bboxes If true, all classes share the same location prediction.
166  * @param[out] all_location_predictions All the location predictions.
167  *
168  */
169 void retrieve_all_priorbox(const ITensor *input_priorbox,
170  const int num_priors,
171  std::vector<BBox> &all_prior_bboxes,
172  std::vector<std::array<float, 4>> &all_prior_variances)
173 {
174  for(int i = 0; i < num_priors; ++i)
175  {
176  all_prior_bboxes[i] =
177  {
178  {
179  *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates(i * 4))),
180  *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates(i * 4 + 1))),
181  *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates(i * 4 + 2))),
182  *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates(i * 4 + 3)))
183  }
184  };
185  }
186 
187  std::array<float, 4> var({ { 0, 0, 0, 0 } });
188  for(int i = 0; i < num_priors; ++i)
189  {
190  for(int j = 0; j < 4; ++j)
191  {
192  var[j] = *reinterpret_cast<float *>(input_priorbox->ptr_to_element(Coordinates((num_priors + i) * 4 + j)));
193  }
194  all_prior_variances[i] = var;
195  }
196 }
197 
198 /** Decode a bbox according to a prior bbox.
199  *
200  * @param[in] prior_bbox The input prior bounding boxes.
201  * @param[in] prior_variance The corresponding input variance.
202  * @param[in] code_type The detection output code type used to decode the results.
203  * @param[in] variance_encoded_in_target If true, the variance is encoded in target.
204  * @param[in] clip_bbox If true, the results should be between 0.f and 1.f.
205  * @param[in] bbox The input bbox to decode
206  * @param[out] decode_bbox The decoded bboxes.
207  *
208  */
209 void DecodeBBox(const BBox &prior_bbox, const std::array<float, 4> &prior_variance,
210  const DetectionOutputLayerCodeType code_type, const bool variance_encoded_in_target,
211  const bool clip_bbox, const BBox &bbox, BBox &decode_bbox)
212 {
213  // if the variance is encoded in target, we simply need to add the offset predictions
214  // otherwise we need to scale the offset accordingly.
215  switch(code_type)
216  {
218  {
219  decode_bbox[0] = prior_bbox[0] + (variance_encoded_in_target ? bbox[0] : prior_variance[0] * bbox[0]);
220  decode_bbox[1] = prior_bbox[1] + (variance_encoded_in_target ? bbox[1] : prior_variance[1] * bbox[1]);
221  decode_bbox[2] = prior_bbox[2] + (variance_encoded_in_target ? bbox[2] : prior_variance[2] * bbox[2]);
222  decode_bbox[3] = prior_bbox[3] + (variance_encoded_in_target ? bbox[3] : prior_variance[3] * bbox[3]);
223 
224  break;
225  }
227  {
228  const float prior_width = prior_bbox[2] - prior_bbox[0];
229  const float prior_height = prior_bbox[3] - prior_bbox[1];
230 
231  // Check if the prior width and height are right
232  ARM_COMPUTE_ERROR_ON(prior_width <= 0.f);
233  ARM_COMPUTE_ERROR_ON(prior_height <= 0.f);
234 
235  const float prior_center_x = (prior_bbox[0] + prior_bbox[2]) / 2.;
236  const float prior_center_y = (prior_bbox[1] + prior_bbox[3]) / 2.;
237 
238  const float decode_bbox_center_x = (variance_encoded_in_target ? bbox[0] : prior_variance[0] * bbox[0]) * prior_width + prior_center_x;
239  const float decode_bbox_center_y = (variance_encoded_in_target ? bbox[1] : prior_variance[1] * bbox[1]) * prior_height + prior_center_y;
240  const float decode_bbox_width = (variance_encoded_in_target ? std::exp(bbox[2]) : std::exp(prior_variance[2] * bbox[2])) * prior_width;
241  const float decode_bbox_height = (variance_encoded_in_target ? std::exp(bbox[3]) : std::exp(prior_variance[3] * bbox[3])) * prior_height;
242 
243  decode_bbox[0] = (decode_bbox_center_x - decode_bbox_width / 2.f);
244  decode_bbox[1] = (decode_bbox_center_y - decode_bbox_height / 2.f);
245  decode_bbox[2] = (decode_bbox_center_x + decode_bbox_width / 2.f);
246  decode_bbox[3] = (decode_bbox_center_y + decode_bbox_height / 2.f);
247 
248  break;
249  }
251  {
252  const float prior_width = prior_bbox[2] - prior_bbox[0];
253  const float prior_height = prior_bbox[3] - prior_bbox[1];
254 
255  // Check if the prior width and height are greater than 0
256  ARM_COMPUTE_ERROR_ON(prior_width <= 0.f);
257  ARM_COMPUTE_ERROR_ON(prior_height <= 0.f);
258 
259  decode_bbox[0] = prior_bbox[0] + (variance_encoded_in_target ? bbox[0] : prior_variance[0] * bbox[0]) * prior_width;
260  decode_bbox[1] = prior_bbox[1] + (variance_encoded_in_target ? bbox[1] : prior_variance[1] * bbox[1]) * prior_height;
261  decode_bbox[2] = prior_bbox[2] + (variance_encoded_in_target ? bbox[2] : prior_variance[2] * bbox[2]) * prior_width;
262  decode_bbox[3] = prior_bbox[3] + (variance_encoded_in_target ? bbox[3] : prior_variance[3] * bbox[3]) * prior_height;
263 
264  break;
265  }
266  default:
267  ARM_COMPUTE_ERROR("Unsupported Detection Output Code Type.");
268  }
269 
270  if(clip_bbox)
271  {
272  for(auto &d_bbox : decode_bbox)
273  {
274  d_bbox = utility::clamp(d_bbox, 0.f, 1.f);
275  }
276  }
277 }
278 
279 /** Do non maximum suppression given bboxes and scores.
280  *
281  * @param[in] bboxes The input bounding boxes.
282  * @param[in] scores The corresponding input confidence.
283  * @param[in] score_threshold The threshold used to filter detection results.
284  * @param[in] nms_threshold The threshold used in non maximum suppression.
285  * @param[in] eta Adaptation rate for nms threshold.
286  * @param[in] top_k If not -1, keep at most top_k picked indices.
287  * @param[out] indices The kept indices of bboxes after nms.
288  *
289  */
290 void ApplyNMSFast(const std::vector<BBox> &bboxes,
291  const std::vector<float> &scores, const float score_threshold,
292  const float nms_threshold, const float eta, const int top_k,
293  std::vector<int> &indices)
294 {
295  ARM_COMPUTE_ERROR_ON_MSG(bboxes.size() != scores.size(), "bboxes and scores have different size.");
296 
297  // Get top_k scores (with corresponding indices).
298  std::list<std::pair<float, int>> score_index_vec;
299 
300  // Generate index score pairs.
301  for(size_t i = 0; i < scores.size(); ++i)
302  {
303  if(scores[i] > score_threshold)
304  {
305  score_index_vec.emplace_back(std::make_pair(scores[i], i));
306  }
307  }
308 
309  // Sort the score pair according to the scores in descending order
310  score_index_vec.sort(SortScorePairDescend<int>);
311 
312  // Keep top_k scores if needed.
313  const int score_index_vec_size = score_index_vec.size();
314  if(top_k > -1 && top_k < score_index_vec_size)
315  {
316  score_index_vec.resize(top_k);
317  }
318 
319  // Do nms.
320  float adaptive_threshold = nms_threshold;
321  indices.clear();
322 
323  while(!score_index_vec.empty())
324  {
325  const int idx = score_index_vec.front().second;
326  bool keep = true;
327  for(int kept_idx : indices)
328  {
329  if(keep)
330  {
331  // Compute the jaccard (intersection over union IoU) overlap between two bboxes.
332  BBox intersect_bbox = std::array<float, 4>({ 0, 0, 0, 0 });
333  if(bboxes[kept_idx][0] > bboxes[idx][2] || bboxes[kept_idx][2] < bboxes[idx][0] || bboxes[kept_idx][1] > bboxes[idx][3] || bboxes[kept_idx][3] < bboxes[idx][1])
334  {
335  intersect_bbox = std::array<float, 4>({ { 0, 0, 0, 0 } });
336  }
337  else
338  {
339  intersect_bbox = std::array<float, 4>({ {
340  std::max(bboxes[idx][0], bboxes[kept_idx][0]),
341  std::max(bboxes[idx][1], bboxes[kept_idx][1]),
342  std::min(bboxes[idx][2], bboxes[kept_idx][2]),
343  std::min(bboxes[idx][3], bboxes[kept_idx][3])
344  }
345  });
346  }
347 
348  float intersect_width = intersect_bbox[2] - intersect_bbox[0];
349  float intersect_height = intersect_bbox[3] - intersect_bbox[1];
350 
351  float overlap = 0.f;
352  if(intersect_width > 0 && intersect_height > 0)
353  {
354  float intersect_size = intersect_width * intersect_height;
355  float bbox1_size = (bboxes[idx][2] < bboxes[idx][0]
356  || bboxes[idx][3] < bboxes[idx][1]) ?
357  0.f :
358  (bboxes[idx][2] - bboxes[idx][0]) * (bboxes[idx][3] - bboxes[idx][1]); //BBoxSize(bboxes[idx]);
359  float bbox2_size = (bboxes[kept_idx][2] < bboxes[kept_idx][0]
360  || bboxes[kept_idx][3] < bboxes[kept_idx][1]) ?
361  0.f :
362  (bboxes[kept_idx][2] - bboxes[kept_idx][0]) * (bboxes[kept_idx][3] - bboxes[kept_idx][1]); // BBoxSize(bboxes[kept_idx]);
363  overlap = intersect_size / (bbox1_size + bbox2_size - intersect_size);
364  }
365  keep = (overlap <= adaptive_threshold);
366  }
367  else
368  {
369  break;
370  }
371  }
372  if(keep)
373  {
374  indices.push_back(idx);
375  }
376  score_index_vec.erase(score_index_vec.begin());
377  if(keep && eta < 1.f && adaptive_threshold > 0.5f)
378  {
379  adaptive_threshold *= eta;
380  }
381  }
382 }
383 } // namespace
384 
386  : _input_loc(nullptr), _input_conf(nullptr), _input_priorbox(nullptr), _output(nullptr), _info(), _num_priors(), _num(), _all_location_predictions(), _all_confidence_scores(), _all_prior_bboxes(),
387  _all_prior_variances(), _all_decode_bboxes(), _all_indices()
388 {
389 }
390 
391 void CPPDetectionOutputLayer::configure(const ITensor *input_loc, const ITensor *input_conf, const ITensor *input_priorbox, ITensor *output, DetectionOutputLayerInfo info)
392 {
393  ARM_COMPUTE_ERROR_ON_NULLPTR(input_loc, input_conf, input_priorbox, output);
394  // Output auto initialization if not yet initialized
395  // Since the number of bboxes to kept is unknown before nms, the shape is set to the maximum
396  // The maximum is keep_top_k * input_loc_size[1]
397  // Each row is a 7 dimension std::vector, which stores [image_id, label, confidence, xmin, ymin, xmax, ymax]
398  const unsigned int max_size = info.keep_top_k() * (input_loc->info()->num_dimensions() > 1 ? input_loc->info()->dimension(1) : 1);
399  auto_init_if_empty(*output->info(), input_loc->info()->clone()->set_tensor_shape(TensorShape(7U, max_size)));
400 
401  // Perform validation step
402  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input_loc->info(), input_conf->info(), input_priorbox->info(), output->info(), info));
403 
404  _input_loc = input_loc;
405  _input_conf = input_conf;
406  _input_priorbox = input_priorbox;
407  _output = output;
408  _info = info;
409  _num_priors = input_priorbox->info()->dimension(0) / 4;
410  _num = (_input_loc->info()->num_dimensions() > 1 ? _input_loc->info()->dimension(1) : 1);
411 
412  _all_location_predictions.resize(_num);
413  _all_confidence_scores.resize(_num);
414  _all_prior_bboxes.resize(_num_priors);
415  _all_prior_variances.resize(_num_priors);
416  _all_decode_bboxes.resize(_num);
417 
418  for(int i = 0; i < _num; ++i)
419  {
420  for(int c = 0; c < _info.num_loc_classes(); ++c)
421  {
422  const int label = _info.share_location() ? -1 : c;
423  if(label == _info.background_label_id())
424  {
425  // Ignore background class.
426  continue;
427  }
428  _all_decode_bboxes[i][label].resize(_num_priors);
429  }
430  }
431  _all_indices.resize(_num);
432 
433  Coordinates coord;
434  coord.set_num_dimensions(output->info()->num_dimensions());
435  output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
436 }
437 
438 Status CPPDetectionOutputLayer::validate(const ITensorInfo *input_loc, const ITensorInfo *input_conf, const ITensorInfo *input_priorbox, const ITensorInfo *output, DetectionOutputLayerInfo info)
439 {
440  ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input_loc, input_conf, input_priorbox, output, info));
441  return Status{};
442 }
443 
445 {
446  // Retrieve all location predictions.
447  retrieve_all_loc_predictions(_input_loc, _num, _num_priors, _info.num_loc_classes(), _info.share_location(), _all_location_predictions);
448 
449  // Retrieve all confidences.
450  retrieve_all_conf_scores(_input_conf, _num, _num_priors, _info.num_classes(), _all_confidence_scores);
451 
452  // Retrieve all prior bboxes.
453  retrieve_all_priorbox(_input_priorbox, _num_priors, _all_prior_bboxes, _all_prior_variances);
454 
455  // Decode all loc predictions to bboxes
456  const bool clip_bbox = false;
457  for(int i = 0; i < _num; ++i)
458  {
459  for(int c = 0; c < _info.num_loc_classes(); ++c)
460  {
461  const int label = _info.share_location() ? -1 : c;
462  if(label == _info.background_label_id())
463  {
464  // Ignore background class.
465  continue;
466  }
467  ARM_COMPUTE_ERROR_ON_MSG_VAR(_all_location_predictions[i].find(label) == _all_location_predictions[i].end(), "Could not find location predictions for label %d.", label);
468 
469  const std::vector<BBox> &label_loc_preds = _all_location_predictions[i].find(label)->second;
470 
471  const int num_bboxes = _all_prior_bboxes.size();
472  ARM_COMPUTE_ERROR_ON(_all_prior_variances[i].size() != 4);
473 
474  for(int j = 0; j < num_bboxes; ++j)
475  {
476  DecodeBBox(_all_prior_bboxes[j], _all_prior_variances[j], _info.code_type(), _info.variance_encoded_in_target(), clip_bbox, label_loc_preds[j], _all_decode_bboxes[i][label][j]);
477  }
478  }
479  }
480 
481  int num_kept = 0;
482 
483  for(int i = 0; i < _num; ++i)
484  {
485  const LabelBBox &decode_bboxes = _all_decode_bboxes[i];
486  const std::map<int, std::vector<float>> &conf_scores = _all_confidence_scores[i];
487 
488  std::map<int, std::vector<int>> indices;
489  int num_det = 0;
490  for(int c = 0; c < _info.num_classes(); ++c)
491  {
492  if(c == _info.background_label_id())
493  {
494  // Ignore background class
495  continue;
496  }
497  const int label = _info.share_location() ? -1 : c;
498  if(conf_scores.find(c) == conf_scores.end() || decode_bboxes.find(label) == decode_bboxes.end())
499  {
500  ARM_COMPUTE_ERROR_VAR("Could not find predictions for label %d.", label);
501  }
502  const std::vector<float> &scores = conf_scores.find(c)->second;
503  const std::vector<BBox> &bboxes = decode_bboxes.find(label)->second;
504 
505  ApplyNMSFast(bboxes, scores, _info.confidence_threshold(), _info.nms_threshold(), _info.eta(), _info.top_k(), indices[c]);
506 
507  num_det += indices[c].size();
508  }
509 
510  int num_to_add = 0;
511  if(_info.keep_top_k() > -1 && num_det > _info.keep_top_k())
512  {
513  std::vector<std::pair<float, std::pair<int, int>>> score_index_pairs;
514  for(auto const &it : indices)
515  {
516  const int label = it.first;
517  const std::vector<int> &label_indices = it.second;
518 
519  if(conf_scores.find(label) == conf_scores.end())
520  {
521  ARM_COMPUTE_ERROR_VAR("Could not find predictions for label %d.", label);
522  }
523 
524  const std::vector<float> &scores = conf_scores.find(label)->second;
525  for(auto idx : label_indices)
526  {
527  ARM_COMPUTE_ERROR_ON(idx > static_cast<int>(scores.size()));
528  score_index_pairs.emplace_back(std::make_pair(scores[idx], std::make_pair(label, idx)));
529  }
530  }
531 
532  // Keep top k results per image.
533  std::sort(score_index_pairs.begin(), score_index_pairs.end(), SortScorePairDescend<std::pair<int, int>>);
534  score_index_pairs.resize(_info.keep_top_k());
535 
536  // Store the new indices.
537 
538  std::map<int, std::vector<int>> new_indices;
539  for(auto score_index_pair : score_index_pairs)
540  {
541  int label = score_index_pair.second.first;
542  int idx = score_index_pair.second.second;
543  new_indices[label].push_back(idx);
544  }
545  _all_indices[i] = new_indices;
546  num_to_add = _info.keep_top_k();
547  }
548  else
549  {
550  _all_indices[i] = indices;
551  num_to_add = num_det;
552  }
553  num_kept += num_to_add;
554  }
555 
556  //Update the valid region of the ouput to mark the exact number of detection
557  _output->info()->set_valid_region(ValidRegion(Coordinates(0, 0), TensorShape(7, num_kept)));
558 
559  int count = 0;
560  for(int i = 0; i < _num; ++i)
561  {
562  const std::map<int, std::vector<float>> &conf_scores = _all_confidence_scores[i];
563  const LabelBBox &decode_bboxes = _all_decode_bboxes[i];
564  for(auto &it : _all_indices[i])
565  {
566  const int label = it.first;
567  const std::vector<float> &scores = conf_scores.find(label)->second;
568  const int loc_label = _info.share_location() ? -1 : label;
569  if(conf_scores.find(label) == conf_scores.end() || decode_bboxes.find(loc_label) == decode_bboxes.end())
570  {
571  // Either if there are no confidence predictions
572  // or there are no location predictions for current label.
573  ARM_COMPUTE_ERROR_VAR("Could not find predictions for the label %d.", label);
574  }
575  const std::vector<BBox> &bboxes = decode_bboxes.find(loc_label)->second;
576  const std::vector<int> &indices = it.second;
577 
578  for(auto idx : indices)
579  {
580  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7)))) = i;
581  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 1)))) = label;
582  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 2)))) = scores[idx];
583  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 3)))) = bboxes[idx][0];
584  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 4)))) = bboxes[idx][1];
585  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 5)))) = bboxes[idx][2];
586  *(reinterpret_cast<float *>(_output->ptr_to_element(Coordinates(count * 7 + 6)))) = bboxes[idx][3];
587 
588  ++count;
589  }
590  }
591  }
592 }
593 } // namespace arm_compute
virtual size_t num_dimensions() const =0
The number of dimensions of the tensor (rank)
int num_classes() const
Get num classes.
Definition: Types.h:1025
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
Definition: ITensor.h:63
Shape of a tensor.
Definition: TensorShape.h:39
bool share_location() const
Get share location.
Definition: Types.h:1030
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
void configure(const ITensor *input_loc, const ITensor *input_conf, const ITensor *input_priorbox, ITensor *output, DetectionOutputLayerInfo info=DetectionOutputLayerInfo())
Configure the detection output layer CPP kernel.
std::map< int, std::vector< BBox > > LabelBBox
Definition: Types.h:964
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
#define ARM_COMPUTE_RETURN_ON_ERROR(status)
Checks if a status contains an error and returns it.
Definition: Error.h:204
1 channel, 1 F32 per channel
#define ARM_COMPUTE_ERROR_VAR(msg,...)
Print the given message then throw an std::runtime_error.
Definition: Error.h:346
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_ERROR_THROW_ON(status)
Definition: Error.h:455
#define ARM_COMPUTE_ERROR_ON_MSG_VAR(cond, msg,...)
Definition: Error.h:457
float eta() const
Get eta.
Definition: Types.h:1055
Status class.
Definition: Error.h:52
DetectionOutputLayerCodeType
Available Detection Output code types.
Definition: Types.h:967
Interface for Neon tensor.
Definition: ITensor.h:36
void run() override
Run the kernels contained in the function.
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(...)
Definition: Validate.h:288
int top_k() const
Get top K.
Definition: Types.h:1070
Copyright (c) 2017-2021 Arm Limited.
virtual void set_valid_region(const ValidRegion &valid_region)=0
Set the valid region of the tensor.
#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)
Definition: Validate.h:163
DataType clamp(const DataType &n, const DataType &lower=std::numeric_limits< RangeType >::lowest(), const DataType &upper=std::numeric_limits< RangeType >::max())
Performs clamping among a lower and upper value.
Definition: Utility.h:99
bool variance_encoded_in_target() const
Get if variance encoded in target.
Definition: Types.h:1040
float nms_threshold() const
Get nms threshold.
Definition: Types.h:1050
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
Coordinates of an item.
Definition: Coordinates.h:37
bool auto_init_if_empty(ITensorInfo &info, const TensorShape &shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())
Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...
std::array< float, 4 > BBox
Definition: Types.h:962
virtual std::unique_ptr< T > clone() const =0
Provide a clone of the current object of class T.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
DetectionOutputLayerCodeType code_type() const
Get detection output code type.
Definition: Types.h:1035
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:290
int keep_top_k() const
Get the number of total bounding boxes to be kept per image.
Definition: Types.h:1045
Detection Output layer info.
Definition: Types.h:976
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
void map(T &tensor, bool blocking)
Maps a tensor if needed.
Definition: Utils.h:216
#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)
Definition: Validate.h:545
#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:792
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *output_stage)
static Status validate(const ITensorInfo *input_loc, const ITensorInfo *input_conf, const ITensorInfo *input_priorbox, const ITensorInfo *output, DetectionOutputLayerInfo info=DetectionOutputLayerInfo())
Static function to check if given info will lead to a valid configuration of CPPDetectionOutputLayer...
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)
Definition: Validate.h:161
void set_num_dimensions(size_t num_dimensions)
Set number of dimensions.
Definition: Dimensions.h:149
Container for valid region of a window.
Definition: Types.h:188
def find(path, pattern)
int background_label_id() const
Get background label ID.
Definition: Types.h:1060
float confidence_threshold() const
Get confidence threshold.
Definition: Types.h:1065
int num_loc_classes() const
Get number of location classes.
Definition: Types.h:1075