Compute Library
 22.11
GraphUtils.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "utils/GraphUtils.h"
26 
28 #include "arm_compute/core/Types.h"
31 
32 #pragma GCC diagnostic push
33 #pragma GCC diagnostic ignored "-Wunused-parameter"
34 #include "utils/ImageLoader.h"
35 #pragma GCC diagnostic pop
36 #include "utils/Utils.h"
37 
38 #include <inttypes.h>
39 #include <iomanip>
40 #include <limits>
41 
42 using namespace arm_compute::graph_utils;
43 
44 namespace
45 {
46 std::pair<arm_compute::TensorShape, arm_compute::PermutationVector> compute_permutation_parameters(const arm_compute::TensorShape &shape,
48 {
49  // Set permutation parameters if needed
50  arm_compute::TensorShape permuted_shape = shape;
52  // Permute only if num_dimensions greater than 2
53  if(shape.num_dimensions() > 2)
54  {
55  perm = (data_layout == arm_compute::DataLayout::NHWC) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U);
56 
58  arm_compute::permute(permuted_shape, perm_shape);
59  }
60 
61  return std::make_pair(permuted_shape, perm);
62 }
63 } // namespace
64 
65 TFPreproccessor::TFPreproccessor(float min_range, float max_range)
66  : _min_range(min_range), _max_range(max_range)
67 {
68 }
70 {
71  if(tensor.info()->data_type() == DataType::F32)
72  {
73  preprocess_typed<float>(tensor);
74  }
75  else if(tensor.info()->data_type() == DataType::F16)
76  {
77  preprocess_typed<half>(tensor);
78  }
79  else
80  {
81  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
82  }
83 }
84 
85 template <typename T>
86 void TFPreproccessor::preprocess_typed(ITensor &tensor)
87 {
88  Window window;
89  window.use_tensor_dimensions(tensor.info()->tensor_shape());
90 
91  const float range = _max_range - _min_range;
92  execute_window_loop(window, [&](const Coordinates & id)
93  {
94  const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id));
95  float res = value / 255.f; // Normalize to [0, 1]
96  res = res * range + _min_range; // Map to [min_range, max_range]
97  *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = res;
98  });
99 }
100 
101 CaffePreproccessor::CaffePreproccessor(std::array<float, 3> mean, bool bgr, float scale)
102  : _mean(mean), _bgr(bgr), _scale(scale)
103 {
104  if(_bgr)
105  {
106  std::swap(_mean[0], _mean[2]);
107  }
108 }
109 
111 {
112  if(tensor.info()->data_type() == DataType::F32)
113  {
114  preprocess_typed<float>(tensor);
115  }
116  else if(tensor.info()->data_type() == DataType::F16)
117  {
118  preprocess_typed<half>(tensor);
119  }
120  else
121  {
122  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
123  }
124 }
125 
126 template <typename T>
127 void CaffePreproccessor::preprocess_typed(ITensor &tensor)
128 {
129  Window window;
130  window.use_tensor_dimensions(tensor.info()->tensor_shape());
132 
133  execute_window_loop(window, [&](const Coordinates & id)
134  {
135  const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id)) - T(_mean[id[channel_idx]]);
136  *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value * T(_scale);
137  });
138 }
139 
140 PPMWriter::PPMWriter(std::string name, unsigned int maximum)
141  : _name(std::move(name)), _iterator(0), _maximum(maximum)
142 {
143 }
144 
146 {
147  std::stringstream ss;
148  ss << _name << _iterator << ".ppm";
149 
150  arm_compute::utils::save_to_ppm(tensor, ss.str());
151 
152  _iterator++;
153  if(_maximum == 0)
154  {
155  return true;
156  }
157  return _iterator < _maximum;
158 }
159 
160 DummyAccessor::DummyAccessor(unsigned int maximum)
161  : _iterator(0), _maximum(maximum)
162 {
163 }
164 
166 {
167  return false;
168 }
169 
171 {
172  ARM_COMPUTE_UNUSED(tensor);
173  bool ret = _maximum == 0 || _iterator < _maximum;
174  if(_iterator == _maximum)
175  {
176  _iterator = 0;
177  }
178  else
179  {
180  _iterator++;
181  }
182  return ret;
183 }
184 
185 NumPyAccessor::NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout, std::ostream &output_stream)
186  : _npy_tensor(), _filename(std::move(npy_path)), _output_stream(output_stream)
187 {
188  NumPyBinLoader loader(_filename, data_layout);
189 
190  TensorInfo info(shape, 1, data_type);
191  info.set_data_layout(data_layout);
192 
193  _npy_tensor.allocator()->init(info);
194  _npy_tensor.allocator()->allocate();
195 
196  loader.access_tensor(_npy_tensor);
197 }
198 
199 template <typename T>
200 void NumPyAccessor::access_numpy_tensor(ITensor &tensor, T tolerance)
201 {
202  const int num_elements = tensor.info()->tensor_shape().total_size();
203  int num_mismatches = utils::compare_tensor<T>(tensor, _npy_tensor, tolerance);
204  float percentage_mismatches = static_cast<float>(num_mismatches) / num_elements;
205 
206  _output_stream << "Results: " << 100.f - (percentage_mismatches * 100) << " % matches with the provided output[" << _filename << "]." << std::endl;
207  _output_stream << " " << num_elements - num_mismatches << " out of " << num_elements << " matches with the provided output[" << _filename << "]." << std::endl
208  << std::endl;
209 }
210 
212 {
214  ARM_COMPUTE_ERROR_ON(_npy_tensor.info()->dimension(0) != tensor.info()->dimension(0));
215 
216  switch(tensor.info()->data_type())
217  {
218  case DataType::QASYMM8:
219  access_numpy_tensor<qasymm8_t>(tensor, 0);
220  break;
221  case DataType::F32:
222  access_numpy_tensor<float>(tensor, 0.0001f);
223  break;
224  default:
225  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
226  }
227 
228  return false;
229 }
230 
231 #ifdef ARM_COMPUTE_ASSERTS_ENABLED
232 PrintAccessor::PrintAccessor(std::ostream &output_stream, IOFormatInfo io_fmt)
233  : _output_stream(output_stream), _io_fmt(io_fmt)
234 {
235 }
236 
238 {
239  tensor.print(_output_stream, _io_fmt);
240  return false;
241 }
242 #endif /* ARM_COMPUTE_ASSERTS_ENABLED */
243 
244 SaveNumPyAccessor::SaveNumPyAccessor(std::string npy_name, const bool is_fortran)
245  : _npy_name(std::move(npy_name)), _is_fortran(is_fortran)
246 {
247 }
248 
250 {
252 
253  utils::save_to_npy(tensor, _npy_name, _is_fortran);
254 
255  return false;
256 }
257 
258 ImageAccessor::ImageAccessor(std::string filename, bool bgr, std::unique_ptr<IPreprocessor> preprocessor)
259  : _already_loaded(false), _filename(std::move(filename)), _bgr(bgr), _preprocessor(std::move(preprocessor))
260 {
261 }
262 
264 {
265  if(!_already_loaded)
266  {
267  auto image_loader = utils::ImageLoaderFactory::create(_filename);
268  ARM_COMPUTE_EXIT_ON_MSG(image_loader == nullptr, "Unsupported image type");
269 
270  // Open image file
271  image_loader->open(_filename);
272 
273  // Get permutated shape and permutation parameters
274  TensorShape permuted_shape = tensor.info()->tensor_shape();
276  if(tensor.info()->data_layout() != DataLayout::NCHW)
277  {
278  std::tie(permuted_shape, perm) = compute_permutation_parameters(tensor.info()->tensor_shape(), tensor.info()->data_layout());
279  }
280 
281 #ifdef __arm__
282  ARM_COMPUTE_EXIT_ON_MSG_VAR(image_loader->width() != permuted_shape.x() || image_loader->height() != permuted_shape.y(),
283  "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu32 ",%" PRIu32 "].",
284  image_loader->width(), image_loader->height(), permuted_shape.x(), permuted_shape.y());
285 #else // __arm__
286  ARM_COMPUTE_EXIT_ON_MSG_VAR(image_loader->width() != permuted_shape.x() || image_loader->height() != permuted_shape.y(),
287  "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu64 ",%" PRIu64 "].",
288  image_loader->width(), image_loader->height(),
289  static_cast<uint64_t>(permuted_shape.x()), static_cast<uint64_t>(permuted_shape.y()));
290 #endif // __arm__
291 
292  // Fill the tensor with the PPM content (BGR)
293  image_loader->fill_planar_tensor(tensor, _bgr);
294 
295  // Preprocess tensor
296  if(_preprocessor)
297  {
298  _preprocessor->preprocess(tensor);
299  }
300  }
301 
302  _already_loaded = !_already_loaded;
303  return _already_loaded;
304 }
305 
307  std::string images_path,
308  std::unique_ptr<IPreprocessor> preprocessor,
309  bool bgr,
310  unsigned int start,
311  unsigned int end,
312  std::ostream &output_stream)
313  : _path(std::move(images_path)), _images(), _preprocessor(std::move(preprocessor)), _bgr(bgr), _offset(0), _output_stream(output_stream)
314 {
315  ARM_COMPUTE_EXIT_ON_MSG(start > end, "Invalid validation range!");
316 
317  std::ifstream ifs;
318  try
319  {
320  ifs.exceptions(std::ifstream::badbit);
321  ifs.open(image_list, std::ios::in | std::ios::binary);
322 
323  // Parse image names
324  unsigned int counter = 0;
325  for(std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter)
326  {
327  // Add image to process if withing range
328  if(counter >= start)
329  {
330  std::stringstream linestream(line);
331  std::string image_name;
332 
333  linestream >> image_name;
334  _images.emplace_back(std::move(image_name));
335  }
336  }
337  }
338  catch(const std::ifstream::failure &e)
339  {
340  ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", image_list.c_str(), e.what());
341  }
342 }
343 
345 {
346  bool ret = _offset < _images.size();
347  if(ret)
348  {
349  utils::JPEGLoader jpeg;
350 
351  // Open JPEG file
352  std::string image_name = _path + _images[_offset++];
353  jpeg.open(image_name);
354  _output_stream << "[" << _offset << "/" << _images.size() << "] Validating " << image_name << std::endl;
355 
356  // Get permutated shape and permutation parameters
357  TensorShape permuted_shape = tensor.info()->tensor_shape();
359  if(tensor.info()->data_layout() != DataLayout::NCHW)
360  {
361  std::tie(permuted_shape, perm) = compute_permutation_parameters(tensor.info()->tensor_shape(),
362  tensor.info()->data_layout());
363  }
364 
365 #ifdef __arm__
366  ARM_COMPUTE_EXIT_ON_MSG_VAR(jpeg.width() != permuted_shape.x() || jpeg.height() != permuted_shape.y(),
367  "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu32 ",%" PRIu32 "].",
368  jpeg.width(), jpeg.height(), permuted_shape.x(), permuted_shape.y());
369 #else // __arm__
370  ARM_COMPUTE_EXIT_ON_MSG_VAR(jpeg.width() != permuted_shape.x() || jpeg.height() != permuted_shape.y(),
371  "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu64 ",%" PRIu64 "].",
372  jpeg.width(), jpeg.height(),
373  static_cast<uint64_t>(permuted_shape.x()), static_cast<uint64_t>(permuted_shape.y()));
374 #endif // __arm__
375 
376  // Fill the tensor with the JPEG content (BGR)
377  jpeg.fill_planar_tensor(tensor, _bgr);
378 
379  // Preprocess tensor
380  if(_preprocessor)
381  {
382  _preprocessor->preprocess(tensor);
383  }
384  }
385 
386  return ret;
387 }
388 
390  std::ostream &output_stream,
391  unsigned int start,
392  unsigned int end)
393  : _results(), _output_stream(output_stream), _offset(0), _positive_samples_top1(0), _positive_samples_top5(0)
394 {
395  ARM_COMPUTE_EXIT_ON_MSG(start > end, "Invalid validation range!");
396 
397  std::ifstream ifs;
398  try
399  {
400  ifs.exceptions(std::ifstream::badbit);
401  ifs.open(image_list, std::ios::in | std::ios::binary);
402 
403  // Parse image correctly classified labels
404  unsigned int counter = 0;
405  for(std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter)
406  {
407  // Add label if within range
408  if(counter >= start)
409  {
410  std::stringstream linestream(line);
411  std::string image_name;
412  int result;
413 
414  linestream >> image_name >> result;
415  _results.emplace_back(result);
416  }
417  }
418  }
419  catch(const std::ifstream::failure &e)
420  {
421  ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", image_list.c_str(), e.what());
422  }
423 }
424 
426 {
427  _offset = 0;
428  _positive_samples_top1 = 0;
429  _positive_samples_top5 = 0;
430 }
431 
433 {
434  bool ret = _offset < _results.size();
435  if(ret)
436  {
437  // Get results
438  std::vector<size_t> tensor_results;
439  switch(tensor.info()->data_type())
440  {
441  case DataType::QASYMM8:
442  tensor_results = access_predictions_tensor<uint8_t>(tensor);
443  break;
444  case DataType::F16:
445  tensor_results = access_predictions_tensor<half>(tensor);
446  break;
447  case DataType::F32:
448  tensor_results = access_predictions_tensor<float>(tensor);
449  break;
450  default:
451  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
452  }
453 
454  // Check if tensor results are within top-n accuracy
455  size_t correct_label = _results[_offset++];
456 
457  aggregate_sample(tensor_results, _positive_samples_top1, 1, correct_label);
458  aggregate_sample(tensor_results, _positive_samples_top5, 5, correct_label);
459  }
460 
461  // Report top_n accuracy
462  if(_offset >= _results.size())
463  {
464  report_top_n(1, _results.size(), _positive_samples_top1);
465  report_top_n(5, _results.size(), _positive_samples_top5);
466  }
467 
468  return ret;
469 }
470 
471 template <typename T>
472 std::vector<size_t> ValidationOutputAccessor::access_predictions_tensor(arm_compute::ITensor &tensor)
473 {
474  // Get the predicted class
475  std::vector<size_t> index;
476 
477  const auto output_net = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
478  const size_t num_classes = tensor.info()->dimension(0);
479 
480  index.resize(num_classes);
481 
482  // Sort results
483  std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
484  std::sort(std::begin(index), std::end(index),
485  [&](size_t a, size_t b)
486  {
487  return output_net[a] > output_net[b];
488  });
489 
490  return index;
491 }
492 
493 void ValidationOutputAccessor::aggregate_sample(const std::vector<size_t> &res, size_t &positive_samples, size_t top_n, size_t correct_label)
494 {
495  auto is_valid_label = [correct_label](size_t label)
496  {
497  return label == correct_label;
498  };
499 
500  if(std::any_of(std::begin(res), std::begin(res) + top_n, is_valid_label))
501  {
502  ++positive_samples;
503  }
504 }
505 
506 void ValidationOutputAccessor::report_top_n(size_t top_n, size_t total_samples, size_t positive_samples)
507 {
508  size_t negative_samples = total_samples - positive_samples;
509  float accuracy = positive_samples / static_cast<float>(total_samples);
510 
511  _output_stream << "----------Top " << top_n << " accuracy ----------" << std::endl
512  << std::endl;
513  _output_stream << "Positive samples : " << positive_samples << std::endl;
514  _output_stream << "Negative samples : " << negative_samples << std::endl;
515  _output_stream << "Accuracy : " << accuracy << std::endl;
516 }
517 
518 DetectionOutputAccessor::DetectionOutputAccessor(const std::string &labels_path, std::vector<TensorShape> &imgs_tensor_shapes, std::ostream &output_stream)
519  : _labels(), _tensor_shapes(std::move(imgs_tensor_shapes)), _output_stream(output_stream)
520 {
521  _labels.clear();
522 
523  std::ifstream ifs;
524 
525  try
526  {
527  ifs.exceptions(std::ifstream::badbit);
528  ifs.open(labels_path, std::ios::in | std::ios::binary);
529 
530  for(std::string line; !std::getline(ifs, line).fail();)
531  {
532  _labels.emplace_back(line);
533  }
534  }
535  catch(const std::ifstream::failure &e)
536  {
537  ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", labels_path.c_str(), e.what());
538  }
539 }
540 
541 template <typename T>
542 void DetectionOutputAccessor::access_predictions_tensor(ITensor &tensor)
543 {
544  const size_t num_detection = tensor.info()->valid_region().shape.y();
545  const auto output_prt = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
546 
547  if(num_detection > 0)
548  {
549  _output_stream << "---------------------- Detections ----------------------" << std::endl
550  << std::endl;
551 
552  _output_stream << std::left << std::setprecision(4) << std::setw(8) << "Image | " << std::setw(8) << "Label | " << std::setw(12) << "Confidence | "
553  << "[ xmin, ymin, xmax, ymax ]" << std::endl;
554 
555  for(size_t i = 0; i < num_detection; ++i)
556  {
557  auto im = static_cast<const int>(output_prt[i * 7]);
558  _output_stream << std::setw(8) << im << std::setw(8)
559  << _labels[output_prt[i * 7 + 1]] << std::setw(12) << output_prt[i * 7 + 2]
560  << " [" << (output_prt[i * 7 + 3] * _tensor_shapes[im].x())
561  << ", " << (output_prt[i * 7 + 4] * _tensor_shapes[im].y())
562  << ", " << (output_prt[i * 7 + 5] * _tensor_shapes[im].x())
563  << ", " << (output_prt[i * 7 + 6] * _tensor_shapes[im].y())
564  << "]" << std::endl;
565  }
566  }
567  else
568  {
569  _output_stream << "No detection found." << std::endl;
570  }
571 }
572 
574 {
576 
577  switch(tensor.info()->data_type())
578  {
579  case DataType::F32:
580  access_predictions_tensor<float>(tensor);
581  break;
582  default:
583  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
584  }
585 
586  return false;
587 }
588 
589 TopNPredictionsAccessor::TopNPredictionsAccessor(const std::string &labels_path, size_t top_n, std::ostream &output_stream)
590  : _labels(), _output_stream(output_stream), _top_n(top_n)
591 {
592  _labels.clear();
593 
594  std::ifstream ifs;
595 
596  try
597  {
598  ifs.exceptions(std::ifstream::badbit);
599  ifs.open(labels_path, std::ios::in | std::ios::binary);
600 
601  for(std::string line; !std::getline(ifs, line).fail();)
602  {
603  _labels.emplace_back(line);
604  }
605  }
606  catch(const std::ifstream::failure &e)
607  {
608  ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", labels_path.c_str(), e.what());
609  }
610 }
611 
612 template <typename T>
613 void TopNPredictionsAccessor::access_predictions_tensor(ITensor &tensor)
614 {
615  // Get the predicted class
616  std::vector<T> classes_prob;
617  std::vector<size_t> index;
618 
619  const auto output_net = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
620  const size_t num_classes = tensor.info()->dimension(0);
621 
622  classes_prob.resize(num_classes);
623  index.resize(num_classes);
624 
625  std::copy(output_net, output_net + num_classes, classes_prob.begin());
626 
627  // Sort results
628  std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
629  std::sort(std::begin(index), std::end(index),
630  [&](size_t a, size_t b)
631  {
632  return classes_prob[a] > classes_prob[b];
633  });
634 
635  _output_stream << "---------- Top " << _top_n << " predictions ----------" << std::endl
636  << std::endl;
637  for(size_t i = 0; i < _top_n; ++i)
638  {
639  _output_stream << std::fixed << std::setprecision(4)
640  << +classes_prob[index.at(i)]
641  << " - [id = " << index.at(i) << "]"
642  << ", " << _labels[index.at(i)] << std::endl;
643  }
644 }
645 
647 {
649  ARM_COMPUTE_ERROR_ON(_labels.size() != tensor.info()->dimension(0));
650 
651  switch(tensor.info()->data_type())
652  {
653  case DataType::QASYMM8:
654  access_predictions_tensor<uint8_t>(tensor);
655  break;
656  case DataType::F32:
657  access_predictions_tensor<float>(tensor);
658  break;
659  default:
660  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
661  }
662 
663  return false;
664 }
665 
666 RandomAccessor::RandomAccessor(PixelValue lower, PixelValue upper, std::random_device::result_type seed)
667  : _lower(lower), _upper(upper), _seed(seed)
668 {
669 }
670 
671 template <typename T, typename D>
672 void RandomAccessor::fill(ITensor &tensor, D &&distribution)
673 {
674  std::mt19937 gen(_seed);
675 
676  if(tensor.info()->padding().empty() && (dynamic_cast<SubTensor *>(&tensor) == nullptr))
677  {
678  for(size_t offset = 0; offset < tensor.info()->total_size(); offset += tensor.info()->element_size())
679  {
680  const auto value = static_cast<T>(distribution(gen));
681  *reinterpret_cast<T *>(tensor.buffer() + offset) = value;
682  }
683  }
684  else
685  {
686  // If tensor has padding accessing tensor elements through execution window.
687  Window window;
688  window.use_tensor_dimensions(tensor.info()->tensor_shape());
689 
690  execute_window_loop(window, [&](const Coordinates & id)
691  {
692  const auto value = static_cast<T>(distribution(gen));
693  *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value;
694  });
695  }
696 }
697 
699 {
700  switch(tensor.info()->data_type())
701  {
702  case DataType::QASYMM8:
703  case DataType::U8:
704  {
705  std::uniform_int_distribution<uint8_t> distribution_u8(_lower.get<uint8_t>(), _upper.get<uint8_t>());
706  fill<uint8_t>(tensor, distribution_u8);
707  break;
708  }
709  case DataType::S8:
710  {
711  std::uniform_int_distribution<int8_t> distribution_s8(_lower.get<int8_t>(), _upper.get<int8_t>());
712  fill<int8_t>(tensor, distribution_s8);
713  break;
714  }
715  case DataType::U16:
716  {
717  std::uniform_int_distribution<uint16_t> distribution_u16(_lower.get<uint16_t>(), _upper.get<uint16_t>());
718  fill<uint16_t>(tensor, distribution_u16);
719  break;
720  }
721  case DataType::S16:
722  {
723  std::uniform_int_distribution<int16_t> distribution_s16(_lower.get<int16_t>(), _upper.get<int16_t>());
724  fill<int16_t>(tensor, distribution_s16);
725  break;
726  }
727  case DataType::U32:
728  {
729  std::uniform_int_distribution<uint32_t> distribution_u32(_lower.get<uint32_t>(), _upper.get<uint32_t>());
730  fill<uint32_t>(tensor, distribution_u32);
731  break;
732  }
733  case DataType::S32:
734  {
735  std::uniform_int_distribution<int32_t> distribution_s32(_lower.get<int32_t>(), _upper.get<int32_t>());
736  fill<int32_t>(tensor, distribution_s32);
737  break;
738  }
739  case DataType::U64:
740  {
741  std::uniform_int_distribution<uint64_t> distribution_u64(_lower.get<uint64_t>(), _upper.get<uint64_t>());
742  fill<uint64_t>(tensor, distribution_u64);
743  break;
744  }
745  case DataType::S64:
746  {
747  std::uniform_int_distribution<int64_t> distribution_s64(_lower.get<int64_t>(), _upper.get<int64_t>());
748  fill<int64_t>(tensor, distribution_s64);
749  break;
750  }
751  case DataType::F16:
752  {
753  arm_compute::utils::uniform_real_distribution_16bit<half> distribution_f16(_lower.get<float>(), _upper.get<float>());
754  fill<half>(tensor, distribution_f16);
755  break;
756  }
757  case DataType::F32:
758  {
759  std::uniform_real_distribution<float> distribution_f32(_lower.get<float>(), _upper.get<float>());
760  fill<float>(tensor, distribution_f32);
761  break;
762  }
763  case DataType::F64:
764  {
765  std::uniform_real_distribution<double> distribution_f64(_lower.get<double>(), _upper.get<double>());
766  fill<double>(tensor, distribution_f64);
767  break;
768  }
769  default:
770  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
771  }
772  return true;
773 }
774 
775 NumPyBinLoader::NumPyBinLoader(std::string filename, DataLayout file_layout)
776  : _already_loaded(false), _filename(std::move(filename)), _file_layout(file_layout)
777 {
778 }
779 
781 {
782  if(!_already_loaded)
783  {
784  utils::NPYLoader loader;
785  loader.open(_filename, _file_layout);
786  loader.fill_tensor(tensor);
787  }
788 
789  _already_loaded = !_already_loaded;
790  return _already_loaded;
791 }
SaveNumPyAccessor(const std::string npy_name, const bool is_fortran=false)
Constructor.
Definition: GraphUtils.cpp:244
ImageAccessor(std::string filename, bool bgr=true, std::unique_ptr< IPreprocessor > preprocessor=nullptr)
Constructor.
Definition: GraphUtils.cpp:258
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:1084
void save_to_ppm(T &tensor, const std::string &ppm_filename)
Template helper function to save a tensor image to a PPM file.
Definition: Utils.h:503
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
Class to load the content of a JPEG file into an Image.
Definition: ImageLoader.h:432
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:646
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
Definition: ITensor.h:63
Shape of a tensor.
Definition: TensorShape.h:39
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
TensorShape shape
Shape of the valid region.
Definition: Types.h:266
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
SimpleTensor< float > b
Definition: DFT.cpp:157
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:211
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
1 channel, 1 U8 per channel
void get(uint8_t &v) const
Interpret the pixel value as a U8.
Definition: PixelValue.h:244
#define ARM_COMPUTE_EXIT_ON_MSG_VAR(cond, msg,...)
If the condition is true, the given message is printed and program exits.
Definition: Error.h:395
virtual DataType data_type() const =0
Data type used for each element of the tensor.
void fill_tensor(T &tensor)
Fill a tensor with the content of the currently open NPY file.
Definition: Utils.h:362
1 channel, 1 F32 per channel
Strides PermutationVector
Permutation vector.
Definition: Types.h:51
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:780
#define ARM_COMPUTE_ERROR_VAR(msg,...)
Print the given message then throw an std::runtime_error.
Definition: Error.h:346
std::stringstream ss(mlgo_str)
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
ValidationOutputAccessor(const std::string &image_list, std::ostream &output_stream=std::cout, unsigned int start=0, unsigned int end=0)
Default Constructor.
Definition: GraphUtils.cpp:389
static std::unique_ptr< IImageLoader > create(const std::string &filename)
Create an image loader depending on the image type.
Definition: ImageLoader.h:509
1 channel, 1 U16 per channel
void preprocess(ITensor &tensor) override
Preprocess the given tensor.
Definition: GraphUtils.cpp:110
RandomAccessor(PixelValue lower, PixelValue upper, const std::random_device::result_type seed=0)
Constructor.
Definition: GraphUtils.cpp:666
SimpleTensor< T > copy(const SimpleTensor< T > &src, const TensorShape &output_shape)
Definition: Copy.cpp:37
void fill_planar_tensor(T &tensor, bool bgr=false)
Fill a tensor with 3 planes (one for each channel) with the content of the currently open image file...
Definition: ImageLoader.h:257
Interface for CPU tensor.
Definition: ITensor.h:36
void use_tensor_dimensions(const TensorShape &shape, size_t first_dimension=Window::DimX)
Use the tensor&#39;s dimensions to fill the window dimensions.
Definition: Window.inl:276
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:249
1 channel, 1 F16 per channel
virtual ValidRegion valid_region() const =0
Valid region of the tensor.
TensorAllocator * allocator()
Return a pointer to the tensor&#39;s allocator.
Definition: Tensor.cpp:48
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor&#39;s metadata.
Definition: Tensor.cpp:33
void permute(Dimensions< T > &dimensions, const PermutationVector &perm)
Permutes given Dimensions according to a permutation vector.
Definition: Helpers.h:125
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:344
1 channel, 1 S32 per channel
T x() const
Alias to access the size of the first dimension.
Definition: Dimensions.h:87
signed 64-bit number
Basic implementation of the sub-tensor interface.
Definition: SubTensor.h:37
Specialized class to generate random non-zero FP16 values.
Definition: Utils.h:254
ITensorInfo & set_data_layout(const DataLayout &data_layout) override
Set the data layout of the tensor.
Definition: TensorInfo.cpp:372
Numpy Binary loader class.
Definition: GraphUtils.h:432
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
#define ARM_COMPUTE_EXIT_ON_MSG(cond, msg)
If the condition is true, the given message is printed and program exits.
Definition: Error.h:379
DetectionOutputAccessor(const std::string &labels_path, std::vector< TensorShape > &imgs_tensor_shapes, std::ostream &output_stream=std::cout)
Constructor.
Definition: GraphUtils.cpp:518
void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
Template helper function to save a tensor image to a NPY file.
Definition: Utils.h:583
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:698
1 channel, 1 U32 per channel
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
SimpleTensor< T > range(SimpleTensor< T > &dst, float start, const size_t num_of_elements, float step)
Definition: Range.cpp:50
bool access_tensor_data() override
Returns true if the tensor data is being accessed.
Definition: GraphUtils.cpp:165
Numpy data loader.
Definition: Utils.h:284
IO formatting information class.
Definition: Types.h:2640
quantized, asymmetric fixed-point 8-bit number unsigned
unsigned int width() const
Return the width of the currently open image file.
Definition: ImageLoader.h:137
Coordinates of an item.
Definition: Coordinates.h:37
virtual uint8_t * buffer() const =0
Interface to be implemented by the child class to return a pointer to CPU memory. ...
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
size_t total_size() const
Collapses all dimensions to a single linear total size.
Definition: TensorShape.h:172
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
virtual size_t element_size() const =0
Element size in bytes calculated as data_size() * num_channels()
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:290
const char * name
virtual PaddingSize padding() const =0
Padding of tensor.
std::uniform_real_distribution< float > distribution(-5.f, 5.f)
NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout=DataLayout::NCHW, std::ostream &output_stream=std::cout)
Constructor.
Definition: GraphUtils.cpp:185
1 channel, 1 S16 per channel
TopNPredictionsAccessor(const std::string &labels_path, size_t top_n=5, std::ostream &output_stream=std::cout)
Constructor.
Definition: GraphUtils.cpp:589
Num samples, channels, height, width.
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:786
Strides of an item in bytes.
Definition: Strides.h:37
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
virtual size_t offset_first_element_in_bytes() const =0
The offset from the beginning of the memory allocation to the first element of the tensor...
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:263
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:432
void preprocess(ITensor &tensor) override
Preprocess the given tensor.
Definition: GraphUtils.cpp:69
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
constexpr bool empty() const
Check if the entire border is zero.
Definition: Types.h:315
size_t get_data_layout_dimension_index(const DataLayout &data_layout, const DataLayoutDimension &data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:143
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:145
Num samples, height, width, channels.
TFPreproccessor(float min_range=-1.f, float max_range=1.f)
Constructor.
Definition: GraphUtils.cpp:65
void open(const std::string &npy_filename, DataLayout file_layout=DataLayout::NCHW)
Open a NPY file and reads its metadata.
Definition: Utils.h:298
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:573
CLTensor * tensor
Pointer to the auxiliary tensor.
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:43
DummyAccessor(unsigned int maximum=1)
Constructor.
Definition: GraphUtils.cpp:160
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
void print(std::ostream &s, IOFormatInfo io_fmt=IOFormatInfo()) const
Print a tensor to a given stream using user defined formatting information.
T y() const
Alias to access the size of the second dimension.
Definition: Dimensions.h:92
PPMWriter(std::string name, unsigned int maximum=1)
Constructor.
Definition: GraphUtils.cpp:140
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:170
64-bit floating-point number
PrintAccessor(std::ostream &output_stream=std::cout, IOFormatInfo io_fmt=IOFormatInfo())
Constructor.
unsigned 64-bit number
DataType
Available data types.
Definition: Types.h:79
void swap(Window &lhs, Window &rhs)
Definition: Window.inl:304
NumPyBinLoader(std::string filename, DataLayout file_layout=DataLayout::NCHW)
Default Constructor.
Definition: GraphUtils.cpp:775
unsigned int height() const
Return the height of the currently open image file.
Definition: ImageLoader.h:142
DataLayout
[DataLayout enum definition]
Definition: Types.h:113
signed 8-bit number
Describe a multidimensional execution window.
Definition: Window.h:39
ValidationInputAccessor(const std::string &image_list, std::string images_path, std::unique_ptr< IPreprocessor > preprocessor=nullptr, bool bgr=true, unsigned int start=0, unsigned int end=0, std::ostream &output_stream=std::cout)
Constructor.
Definition: GraphUtils.cpp:306
CaffePreproccessor(std::array< float, 3 > mean=std::array< float, 3 > { { 0, 0, 0 } }, bool bgr=true, float scale=1.f)
Default Constructor.
Definition: GraphUtils.cpp:101
void open(const std::string &filename) override
Open an image file and reads its metadata (Width, height)
Definition: ImageLoader.h:456
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.