Compute Library
 21.02
GraphUtils.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "utils/GraphUtils.h"
26 
28 #include "arm_compute/core/Types.h"
31 
32 #pragma GCC diagnostic push
33 #pragma GCC diagnostic ignored "-Wunused-parameter"
34 #include "utils/ImageLoader.h"
35 #pragma GCC diagnostic pop
36 #include "utils/Utils.h"
37 
38 #include <inttypes.h>
39 #include <iomanip>
40 #include <limits>
41 
42 using namespace arm_compute::graph_utils;
43 
44 namespace
45 {
46 std::pair<arm_compute::TensorShape, arm_compute::PermutationVector> compute_permutation_parameters(const arm_compute::TensorShape &shape,
48 {
49  // Set permutation parameters if needed
50  arm_compute::TensorShape permuted_shape = shape;
52  // Permute only if num_dimensions greater than 2
53  if(shape.num_dimensions() > 2)
54  {
55  perm = (data_layout == arm_compute::DataLayout::NHWC) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U);
56 
58  arm_compute::permute(permuted_shape, perm_shape);
59  }
60 
61  return std::make_pair(permuted_shape, perm);
62 }
63 } // namespace
64 
65 TFPreproccessor::TFPreproccessor(float min_range, float max_range)
66  : _min_range(min_range), _max_range(max_range)
67 {
68 }
70 {
71  if(tensor.info()->data_type() == DataType::F32)
72  {
73  preprocess_typed<float>(tensor);
74  }
75  else if(tensor.info()->data_type() == DataType::F16)
76  {
77  preprocess_typed<half>(tensor);
78  }
79  else
80  {
81  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
82  }
83 }
84 
85 template <typename T>
86 void TFPreproccessor::preprocess_typed(ITensor &tensor)
87 {
88  Window window;
89  window.use_tensor_dimensions(tensor.info()->tensor_shape());
90 
91  const float range = _max_range - _min_range;
92  execute_window_loop(window, [&](const Coordinates & id)
93  {
94  const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id));
95  float res = value / 255.f; // Normalize to [0, 1]
96  res = res * range + _min_range; // Map to [min_range, max_range]
97  *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = res;
98  });
99 }
100 
101 CaffePreproccessor::CaffePreproccessor(std::array<float, 3> mean, bool bgr, float scale)
102  : _mean(mean), _bgr(bgr), _scale(scale)
103 {
104  if(_bgr)
105  {
106  std::swap(_mean[0], _mean[2]);
107  }
108 }
109 
111 {
112  if(tensor.info()->data_type() == DataType::F32)
113  {
114  preprocess_typed<float>(tensor);
115  }
116  else if(tensor.info()->data_type() == DataType::F16)
117  {
118  preprocess_typed<half>(tensor);
119  }
120  else
121  {
122  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
123  }
124 }
125 
126 template <typename T>
127 void CaffePreproccessor::preprocess_typed(ITensor &tensor)
128 {
129  Window window;
130  window.use_tensor_dimensions(tensor.info()->tensor_shape());
132 
133  execute_window_loop(window, [&](const Coordinates & id)
134  {
135  const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id)) - T(_mean[id[channel_idx]]);
136  *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value * T(_scale);
137  });
138 }
139 
140 PPMWriter::PPMWriter(std::string name, unsigned int maximum)
141  : _name(std::move(name)), _iterator(0), _maximum(maximum)
142 {
143 }
144 
146 {
147  std::stringstream ss;
148  ss << _name << _iterator << ".ppm";
149 
150  arm_compute::utils::save_to_ppm(tensor, ss.str());
151 
152  _iterator++;
153  if(_maximum == 0)
154  {
155  return true;
156  }
157  return _iterator < _maximum;
158 }
159 
160 DummyAccessor::DummyAccessor(unsigned int maximum)
161  : _iterator(0), _maximum(maximum)
162 {
163 }
164 
166 {
167  ARM_COMPUTE_UNUSED(tensor);
168  bool ret = _maximum == 0 || _iterator < _maximum;
169  if(_iterator == _maximum)
170  {
171  _iterator = 0;
172  }
173  else
174  {
175  _iterator++;
176  }
177  return ret;
178 }
179 
180 NumPyAccessor::NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout, std::ostream &output_stream)
181  : _npy_tensor(), _filename(std::move(npy_path)), _output_stream(output_stream)
182 {
183  NumPyBinLoader loader(_filename, data_layout);
184 
185  TensorInfo info(shape, 1, data_type);
186  info.set_data_layout(data_layout);
187 
188  _npy_tensor.allocator()->init(info);
189  _npy_tensor.allocator()->allocate();
190 
191  loader.access_tensor(_npy_tensor);
192 }
193 
194 template <typename T>
195 void NumPyAccessor::access_numpy_tensor(ITensor &tensor, T tolerance)
196 {
197  const int num_elements = tensor.info()->tensor_shape().total_size();
198  int num_mismatches = utils::compare_tensor<T>(tensor, _npy_tensor, tolerance);
199  float percentage_mismatches = static_cast<float>(num_mismatches) / num_elements;
200 
201  _output_stream << "Results: " << 100.f - (percentage_mismatches * 100) << " % matches with the provided output[" << _filename << "]." << std::endl;
202  _output_stream << " " << num_elements - num_mismatches << " out of " << num_elements << " matches with the provided output[" << _filename << "]." << std::endl
203  << std::endl;
204 }
205 
207 {
209  ARM_COMPUTE_ERROR_ON(_npy_tensor.info()->dimension(0) != tensor.info()->dimension(0));
210 
211  switch(tensor.info()->data_type())
212  {
213  case DataType::QASYMM8:
214  access_numpy_tensor<qasymm8_t>(tensor, 0);
215  break;
216  case DataType::F32:
217  access_numpy_tensor<float>(tensor, 0.0001f);
218  break;
219  default:
220  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
221  }
222 
223  return false;
224 }
225 
226 #ifdef ARM_COMPUTE_ASSERTS_ENABLED
227 PrintAccessor::PrintAccessor(std::ostream &output_stream, IOFormatInfo io_fmt)
228  : _output_stream(output_stream), _io_fmt(io_fmt)
229 {
230 }
231 
233 {
234  tensor.print(_output_stream, _io_fmt);
235  return false;
236 }
237 #endif /* ARM_COMPUTE_ASSERTS_ENABLED */
238 
239 SaveNumPyAccessor::SaveNumPyAccessor(std::string npy_name, const bool is_fortran)
240  : _npy_name(std::move(npy_name)), _is_fortran(is_fortran)
241 {
242 }
243 
245 {
247 
248  utils::save_to_npy(tensor, _npy_name, _is_fortran);
249 
250  return false;
251 }
252 
253 ImageAccessor::ImageAccessor(std::string filename, bool bgr, std::unique_ptr<IPreprocessor> preprocessor)
254  : _already_loaded(false), _filename(std::move(filename)), _bgr(bgr), _preprocessor(std::move(preprocessor))
255 {
256 }
257 
259 {
260  if(!_already_loaded)
261  {
262  auto image_loader = utils::ImageLoaderFactory::create(_filename);
263  ARM_COMPUTE_EXIT_ON_MSG(image_loader == nullptr, "Unsupported image type");
264 
265  // Open image file
266  image_loader->open(_filename);
267 
268  // Get permutated shape and permutation parameters
269  TensorShape permuted_shape = tensor.info()->tensor_shape();
271  if(tensor.info()->data_layout() != DataLayout::NCHW)
272  {
273  std::tie(permuted_shape, perm) = compute_permutation_parameters(tensor.info()->tensor_shape(), tensor.info()->data_layout());
274  }
275 
276 #ifdef __arm__
277  ARM_COMPUTE_EXIT_ON_MSG_VAR(image_loader->width() != permuted_shape.x() || image_loader->height() != permuted_shape.y(),
278  "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu32 ",%" PRIu32 "].",
279  image_loader->width(), image_loader->height(), permuted_shape.x(), permuted_shape.y());
280 #else // __arm__
281  ARM_COMPUTE_EXIT_ON_MSG_VAR(image_loader->width() != permuted_shape.x() || image_loader->height() != permuted_shape.y(),
282  "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu64 ",%" PRIu64 "].",
283  image_loader->width(), image_loader->height(),
284  static_cast<uint64_t>(permuted_shape.x()), static_cast<uint64_t>(permuted_shape.y()));
285 #endif // __arm__
286 
287  // Fill the tensor with the PPM content (BGR)
288  image_loader->fill_planar_tensor(tensor, _bgr);
289 
290  // Preprocess tensor
291  if(_preprocessor)
292  {
293  _preprocessor->preprocess(tensor);
294  }
295  }
296 
297  _already_loaded = !_already_loaded;
298  return _already_loaded;
299 }
300 
302  std::string images_path,
303  std::unique_ptr<IPreprocessor> preprocessor,
304  bool bgr,
305  unsigned int start,
306  unsigned int end,
307  std::ostream &output_stream)
308  : _path(std::move(images_path)), _images(), _preprocessor(std::move(preprocessor)), _bgr(bgr), _offset(0), _output_stream(output_stream)
309 {
310  ARM_COMPUTE_EXIT_ON_MSG(start > end, "Invalid validation range!");
311 
312  std::ifstream ifs;
313  try
314  {
315  ifs.exceptions(std::ifstream::badbit);
316  ifs.open(image_list, std::ios::in | std::ios::binary);
317 
318  // Parse image names
319  unsigned int counter = 0;
320  for(std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter)
321  {
322  // Add image to process if withing range
323  if(counter >= start)
324  {
325  std::stringstream linestream(line);
326  std::string image_name;
327 
328  linestream >> image_name;
329  _images.emplace_back(std::move(image_name));
330  }
331  }
332  }
333  catch(const std::ifstream::failure &e)
334  {
335  ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", image_list.c_str(), e.what());
336  }
337 }
338 
340 {
341  bool ret = _offset < _images.size();
342  if(ret)
343  {
344  utils::JPEGLoader jpeg;
345 
346  // Open JPEG file
347  std::string image_name = _path + _images[_offset++];
348  jpeg.open(image_name);
349  _output_stream << "[" << _offset << "/" << _images.size() << "] Validating " << image_name << std::endl;
350 
351  // Get permutated shape and permutation parameters
352  TensorShape permuted_shape = tensor.info()->tensor_shape();
354  if(tensor.info()->data_layout() != DataLayout::NCHW)
355  {
356  std::tie(permuted_shape, perm) = compute_permutation_parameters(tensor.info()->tensor_shape(),
357  tensor.info()->data_layout());
358  }
359 
360 #ifdef __arm__
361  ARM_COMPUTE_EXIT_ON_MSG_VAR(jpeg.width() != permuted_shape.x() || jpeg.height() != permuted_shape.y(),
362  "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu32 ",%" PRIu32 "].",
363  jpeg.width(), jpeg.height(), permuted_shape.x(), permuted_shape.y());
364 #else // __arm__
365  ARM_COMPUTE_EXIT_ON_MSG_VAR(jpeg.width() != permuted_shape.x() || jpeg.height() != permuted_shape.y(),
366  "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu64 ",%" PRIu64 "].",
367  jpeg.width(), jpeg.height(),
368  static_cast<uint64_t>(permuted_shape.x()), static_cast<uint64_t>(permuted_shape.y()));
369 #endif // __arm__
370 
371  // Fill the tensor with the JPEG content (BGR)
372  jpeg.fill_planar_tensor(tensor, _bgr);
373 
374  // Preprocess tensor
375  if(_preprocessor)
376  {
377  _preprocessor->preprocess(tensor);
378  }
379  }
380 
381  return ret;
382 }
383 
385  std::ostream &output_stream,
386  unsigned int start,
387  unsigned int end)
388  : _results(), _output_stream(output_stream), _offset(0), _positive_samples_top1(0), _positive_samples_top5(0)
389 {
390  ARM_COMPUTE_EXIT_ON_MSG(start > end, "Invalid validation range!");
391 
392  std::ifstream ifs;
393  try
394  {
395  ifs.exceptions(std::ifstream::badbit);
396  ifs.open(image_list, std::ios::in | std::ios::binary);
397 
398  // Parse image correctly classified labels
399  unsigned int counter = 0;
400  for(std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter)
401  {
402  // Add label if within range
403  if(counter >= start)
404  {
405  std::stringstream linestream(line);
406  std::string image_name;
407  int result;
408 
409  linestream >> image_name >> result;
410  _results.emplace_back(result);
411  }
412  }
413  }
414  catch(const std::ifstream::failure &e)
415  {
416  ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", image_list.c_str(), e.what());
417  }
418 }
419 
421 {
422  _offset = 0;
423  _positive_samples_top1 = 0;
424  _positive_samples_top5 = 0;
425 }
426 
428 {
429  bool ret = _offset < _results.size();
430  if(ret)
431  {
432  // Get results
433  std::vector<size_t> tensor_results;
434  switch(tensor.info()->data_type())
435  {
436  case DataType::QASYMM8:
437  tensor_results = access_predictions_tensor<uint8_t>(tensor);
438  break;
439  case DataType::F16:
440  tensor_results = access_predictions_tensor<half>(tensor);
441  break;
442  case DataType::F32:
443  tensor_results = access_predictions_tensor<float>(tensor);
444  break;
445  default:
446  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
447  }
448 
449  // Check if tensor results are within top-n accuracy
450  size_t correct_label = _results[_offset++];
451 
452  aggregate_sample(tensor_results, _positive_samples_top1, 1, correct_label);
453  aggregate_sample(tensor_results, _positive_samples_top5, 5, correct_label);
454  }
455 
456  // Report top_n accuracy
457  if(_offset >= _results.size())
458  {
459  report_top_n(1, _results.size(), _positive_samples_top1);
460  report_top_n(5, _results.size(), _positive_samples_top5);
461  }
462 
463  return ret;
464 }
465 
466 template <typename T>
467 std::vector<size_t> ValidationOutputAccessor::access_predictions_tensor(arm_compute::ITensor &tensor)
468 {
469  // Get the predicted class
470  std::vector<size_t> index;
471 
472  const auto output_net = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
473  const size_t num_classes = tensor.info()->dimension(0);
474 
475  index.resize(num_classes);
476 
477  // Sort results
478  std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
479  std::sort(std::begin(index), std::end(index),
480  [&](size_t a, size_t b)
481  {
482  return output_net[a] > output_net[b];
483  });
484 
485  return index;
486 }
487 
488 void ValidationOutputAccessor::aggregate_sample(const std::vector<size_t> &res, size_t &positive_samples, size_t top_n, size_t correct_label)
489 {
490  auto is_valid_label = [correct_label](size_t label)
491  {
492  return label == correct_label;
493  };
494 
495  if(std::any_of(std::begin(res), std::begin(res) + top_n, is_valid_label))
496  {
497  ++positive_samples;
498  }
499 }
500 
501 void ValidationOutputAccessor::report_top_n(size_t top_n, size_t total_samples, size_t positive_samples)
502 {
503  size_t negative_samples = total_samples - positive_samples;
504  float accuracy = positive_samples / static_cast<float>(total_samples);
505 
506  _output_stream << "----------Top " << top_n << " accuracy ----------" << std::endl
507  << std::endl;
508  _output_stream << "Positive samples : " << positive_samples << std::endl;
509  _output_stream << "Negative samples : " << negative_samples << std::endl;
510  _output_stream << "Accuracy : " << accuracy << std::endl;
511 }
512 
513 DetectionOutputAccessor::DetectionOutputAccessor(const std::string &labels_path, std::vector<TensorShape> &imgs_tensor_shapes, std::ostream &output_stream)
514  : _labels(), _tensor_shapes(std::move(imgs_tensor_shapes)), _output_stream(output_stream)
515 {
516  _labels.clear();
517 
518  std::ifstream ifs;
519 
520  try
521  {
522  ifs.exceptions(std::ifstream::badbit);
523  ifs.open(labels_path, std::ios::in | std::ios::binary);
524 
525  for(std::string line; !std::getline(ifs, line).fail();)
526  {
527  _labels.emplace_back(line);
528  }
529  }
530  catch(const std::ifstream::failure &e)
531  {
532  ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", labels_path.c_str(), e.what());
533  }
534 }
535 
536 template <typename T>
537 void DetectionOutputAccessor::access_predictions_tensor(ITensor &tensor)
538 {
539  const size_t num_detection = tensor.info()->valid_region().shape.y();
540  const auto output_prt = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
541 
542  if(num_detection > 0)
543  {
544  _output_stream << "---------------------- Detections ----------------------" << std::endl
545  << std::endl;
546 
547  _output_stream << std::left << std::setprecision(4) << std::setw(8) << "Image | " << std::setw(8) << "Label | " << std::setw(12) << "Confidence | "
548  << "[ xmin, ymin, xmax, ymax ]" << std::endl;
549 
550  for(size_t i = 0; i < num_detection; ++i)
551  {
552  auto im = static_cast<const int>(output_prt[i * 7]);
553  _output_stream << std::setw(8) << im << std::setw(8)
554  << _labels[output_prt[i * 7 + 1]] << std::setw(12) << output_prt[i * 7 + 2]
555  << " [" << (output_prt[i * 7 + 3] * _tensor_shapes[im].x())
556  << ", " << (output_prt[i * 7 + 4] * _tensor_shapes[im].y())
557  << ", " << (output_prt[i * 7 + 5] * _tensor_shapes[im].x())
558  << ", " << (output_prt[i * 7 + 6] * _tensor_shapes[im].y())
559  << "]" << std::endl;
560  }
561  }
562  else
563  {
564  _output_stream << "No detection found." << std::endl;
565  }
566 }
567 
569 {
571 
572  switch(tensor.info()->data_type())
573  {
574  case DataType::F32:
575  access_predictions_tensor<float>(tensor);
576  break;
577  default:
578  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
579  }
580 
581  return false;
582 }
583 
584 TopNPredictionsAccessor::TopNPredictionsAccessor(const std::string &labels_path, size_t top_n, std::ostream &output_stream)
585  : _labels(), _output_stream(output_stream), _top_n(top_n)
586 {
587  _labels.clear();
588 
589  std::ifstream ifs;
590 
591  try
592  {
593  ifs.exceptions(std::ifstream::badbit);
594  ifs.open(labels_path, std::ios::in | std::ios::binary);
595 
596  for(std::string line; !std::getline(ifs, line).fail();)
597  {
598  _labels.emplace_back(line);
599  }
600  }
601  catch(const std::ifstream::failure &e)
602  {
603  ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", labels_path.c_str(), e.what());
604  }
605 }
606 
607 template <typename T>
608 void TopNPredictionsAccessor::access_predictions_tensor(ITensor &tensor)
609 {
610  // Get the predicted class
611  std::vector<T> classes_prob;
612  std::vector<size_t> index;
613 
614  const auto output_net = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
615  const size_t num_classes = tensor.info()->dimension(0);
616 
617  classes_prob.resize(num_classes);
618  index.resize(num_classes);
619 
620  std::copy(output_net, output_net + num_classes, classes_prob.begin());
621 
622  // Sort results
623  std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
624  std::sort(std::begin(index), std::end(index),
625  [&](size_t a, size_t b)
626  {
627  return classes_prob[a] > classes_prob[b];
628  });
629 
630  _output_stream << "---------- Top " << _top_n << " predictions ----------" << std::endl
631  << std::endl;
632  for(size_t i = 0; i < _top_n; ++i)
633  {
634  _output_stream << std::fixed << std::setprecision(4)
635  << +classes_prob[index.at(i)]
636  << " - [id = " << index.at(i) << "]"
637  << ", " << _labels[index.at(i)] << std::endl;
638  }
639 }
640 
642 {
644  ARM_COMPUTE_ERROR_ON(_labels.size() != tensor.info()->dimension(0));
645 
646  switch(tensor.info()->data_type())
647  {
648  case DataType::QASYMM8:
649  access_predictions_tensor<uint8_t>(tensor);
650  break;
651  case DataType::F32:
652  access_predictions_tensor<float>(tensor);
653  break;
654  default:
655  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
656  }
657 
658  return false;
659 }
660 
661 RandomAccessor::RandomAccessor(PixelValue lower, PixelValue upper, std::random_device::result_type seed)
662  : _lower(lower), _upper(upper), _seed(seed)
663 {
664 }
665 
666 template <typename T, typename D>
667 void RandomAccessor::fill(ITensor &tensor, D &&distribution)
668 {
669  std::mt19937 gen(_seed);
670 
671  if(tensor.info()->padding().empty() && (dynamic_cast<SubTensor *>(&tensor) == nullptr))
672  {
673  for(size_t offset = 0; offset < tensor.info()->total_size(); offset += tensor.info()->element_size())
674  {
675  const auto value = static_cast<T>(distribution(gen));
676  *reinterpret_cast<T *>(tensor.buffer() + offset) = value;
677  }
678  }
679  else
680  {
681  // If tensor has padding accessing tensor elements through execution window.
682  Window window;
683  window.use_tensor_dimensions(tensor.info()->tensor_shape());
684 
685  execute_window_loop(window, [&](const Coordinates & id)
686  {
687  const auto value = static_cast<T>(distribution(gen));
688  *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value;
689  });
690  }
691 }
692 
694 {
695  switch(tensor.info()->data_type())
696  {
697  case DataType::QASYMM8:
698  case DataType::U8:
699  {
700  std::uniform_int_distribution<uint8_t> distribution_u8(_lower.get<uint8_t>(), _upper.get<uint8_t>());
701  fill<uint8_t>(tensor, distribution_u8);
702  break;
703  }
704  case DataType::S8:
705  {
706  std::uniform_int_distribution<int8_t> distribution_s8(_lower.get<int8_t>(), _upper.get<int8_t>());
707  fill<int8_t>(tensor, distribution_s8);
708  break;
709  }
710  case DataType::U16:
711  {
712  std::uniform_int_distribution<uint16_t> distribution_u16(_lower.get<uint16_t>(), _upper.get<uint16_t>());
713  fill<uint16_t>(tensor, distribution_u16);
714  break;
715  }
716  case DataType::S16:
717  {
718  std::uniform_int_distribution<int16_t> distribution_s16(_lower.get<int16_t>(), _upper.get<int16_t>());
719  fill<int16_t>(tensor, distribution_s16);
720  break;
721  }
722  case DataType::U32:
723  {
724  std::uniform_int_distribution<uint32_t> distribution_u32(_lower.get<uint32_t>(), _upper.get<uint32_t>());
725  fill<uint32_t>(tensor, distribution_u32);
726  break;
727  }
728  case DataType::S32:
729  {
730  std::uniform_int_distribution<int32_t> distribution_s32(_lower.get<int32_t>(), _upper.get<int32_t>());
731  fill<int32_t>(tensor, distribution_s32);
732  break;
733  }
734  case DataType::U64:
735  {
736  std::uniform_int_distribution<uint64_t> distribution_u64(_lower.get<uint64_t>(), _upper.get<uint64_t>());
737  fill<uint64_t>(tensor, distribution_u64);
738  break;
739  }
740  case DataType::S64:
741  {
742  std::uniform_int_distribution<int64_t> distribution_s64(_lower.get<int64_t>(), _upper.get<int64_t>());
743  fill<int64_t>(tensor, distribution_s64);
744  break;
745  }
746  case DataType::F16:
747  {
748  arm_compute::utils::uniform_real_distribution_16bit<half> distribution_f16(_lower.get<float>(), _upper.get<float>());
749  fill<half>(tensor, distribution_f16);
750  break;
751  }
752  case DataType::F32:
753  {
754  std::uniform_real_distribution<float> distribution_f32(_lower.get<float>(), _upper.get<float>());
755  fill<float>(tensor, distribution_f32);
756  break;
757  }
758  case DataType::F64:
759  {
760  std::uniform_real_distribution<double> distribution_f64(_lower.get<double>(), _upper.get<double>());
761  fill<double>(tensor, distribution_f64);
762  break;
763  }
764  default:
765  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
766  }
767  return true;
768 }
769 
770 NumPyBinLoader::NumPyBinLoader(std::string filename, DataLayout file_layout)
771  : _already_loaded(false), _filename(std::move(filename)), _file_layout(file_layout)
772 {
773 }
774 
776 {
777  if(!_already_loaded)
778  {
779  utils::NPYLoader loader;
780  loader.open(_filename, _file_layout);
781  loader.fill_tensor(tensor);
782  }
783 
784  _already_loaded = !_already_loaded;
785  return _already_loaded;
786 }
SaveNumPyAccessor(const std::string npy_name, const bool is_fortran=false)
Constructor.
Definition: GraphUtils.cpp:239
ImageAccessor(std::string filename, bool bgr=true, std::unique_ptr< IPreprocessor > preprocessor=nullptr)
Constructor.
Definition: GraphUtils.cpp:253
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:846
void save_to_ppm(T &tensor, const std::string &ppm_filename)
Template helper function to save a tensor image to a PPM file.
Definition: Utils.h:547
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
Class to load the content of a JPEG file into an Image.
Definition: ImageLoader.h:432
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:641
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
Definition: ITensor.h:63
Shape of a tensor.
Definition: TensorShape.h:39
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
TensorShape shape
Shape of the valid region.
Definition: Types.h:261
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
SimpleTensor< float > b
Definition: DFT.cpp:157
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:206
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
1 channel, 1 U8 per channel
void get(uint8_t &v) const
Interpret the pixel value as a U8.
Definition: PixelValue.h:241
#define ARM_COMPUTE_EXIT_ON_MSG_VAR(cond, msg,...)
If the condition is true, the given message is printed and program exits.
Definition: Error.h:395
virtual DataType data_type() const =0
Data type used for each element of the tensor.
void fill_tensor(T &tensor)
Fill a tensor with the content of the currently open NPY file.
Definition: Utils.h:406
1 channel, 1 F32 per channel
Strides PermutationVector
Permutation vector.
Definition: Types.h:49
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:775
#define ARM_COMPUTE_ERROR_VAR(msg,...)
Print the given message then throw an std::runtime_error.
Definition: Error.h:346
std::stringstream ss(mlgo_str)
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
const DataLayout data_layout
Definition: Im2Col.cpp:151
ValidationOutputAccessor(const std::string &image_list, std::ostream &output_stream=std::cout, unsigned int start=0, unsigned int end=0)
Default Constructor.
Definition: GraphUtils.cpp:384
static std::unique_ptr< IImageLoader > create(const std::string &filename)
Create an image loader depending on the image type.
Definition: ImageLoader.h:509
1 channel, 1 U16 per channel
void preprocess(ITensor &tensor) override
Preprocess the given tensor.
Definition: GraphUtils.cpp:110
RandomAccessor(PixelValue lower, PixelValue upper, const std::random_device::result_type seed=0)
Constructor.
Definition: GraphUtils.cpp:661
SimpleTensor< T > copy(const SimpleTensor< T > &src, const TensorShape &output_shape)
Definition: Copy.cpp:37
void fill_planar_tensor(T &tensor, bool bgr=false)
Fill a tensor with 3 planes (one for each channel) with the content of the currently open image file...
Definition: ImageLoader.h:257
Interface for Neon tensor.
Definition: ITensor.h:36
void use_tensor_dimensions(const TensorShape &shape, size_t first_dimension=Window::DimX)
Use the tensor&#39;s dimensions to fill the window dimensions.
Definition: Window.inl:276
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:244
1 channel, 1 F16 per channel
virtual ValidRegion valid_region() const =0
Valid region of the tensor.
TensorAllocator * allocator()
Return a pointer to the tensor&#39;s allocator.
Definition: Tensor.cpp:48
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor&#39;s metadata.
Definition: Tensor.cpp:33
void permute(Dimensions< T > &dimensions, const PermutationVector &perm)
Permutes given Dimensions according to a permutation vector.
Definition: Helpers.h:125
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:339
1 channel, 1 S32 per channel
T x() const
Alias to access the size of the first dimension.
Definition: Dimensions.h:87
signed 64-bit number
Basic implementation of the sub-tensor interface.
Definition: SubTensor.h:37
Specialized class to generate random non-zero FP16 values.
Definition: Utils.h:298
const DataType data_type
Definition: Im2Col.cpp:150
ITensorInfo & set_data_layout(const DataLayout &data_layout) override
Set the data layout of the tensor.
Definition: TensorInfo.cpp:386
Numpy Binary loader class.
Definition: GraphUtils.h:431
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
#define ARM_COMPUTE_EXIT_ON_MSG(cond, msg)
If the condition is true, the given message is printed and program exits.
Definition: Error.h:379
DetectionOutputAccessor(const std::string &labels_path, std::vector< TensorShape > &imgs_tensor_shapes, std::ostream &output_stream=std::cout)
Constructor.
Definition: GraphUtils.cpp:513
void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
Template helper function to save a tensor image to a NPY file.
Definition: Utils.h:627
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:693
1 channel, 1 U32 per channel
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
SimpleTensor< T > range(SimpleTensor< T > &dst, float start, const size_t num_of_elements, float step)
Definition: Range.cpp:50
Numpy data loader.
Definition: Utils.h:328
IO formatting information class.
Definition: Types.h:2205
quantized, asymmetric fixed-point 8-bit number unsigned
unsigned int width() const
Return the width of the currently open image file.
Definition: ImageLoader.h:137
Coordinates of an item.
Definition: Coordinates.h:37
virtual uint8_t * buffer() const =0
Interface to be implemented by the child class to return a pointer to CPU memory. ...
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
size_t total_size() const
Collapses all dimensions to a single linear total size.
Definition: TensorShape.h:172
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.
virtual size_t element_size() const =0
Element size in bytes calculated as data_size() * num_channels()
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:290
virtual PaddingSize padding() const =0
Padding of tensor.
std::uniform_real_distribution< float > distribution(-5.f, 5.f)
NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout=DataLayout::NCHW, std::ostream &output_stream=std::cout)
Constructor.
Definition: GraphUtils.cpp:180
1 channel, 1 S16 per channel
TopNPredictionsAccessor(const std::string &labels_path, size_t top_n=5, std::ostream &output_stream=std::cout)
Constructor.
Definition: GraphUtils.cpp:584
Num samples, channels, height, width.
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:790
const char * name
Strides of an item in bytes.
Definition: Strides.h:37
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
virtual size_t offset_first_element_in_bytes() const =0
The offset from the beginning of the memory allocation to the first element of the tensor...
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:258
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:427
void preprocess(ITensor &tensor) override
Preprocess the given tensor.
Definition: GraphUtils.cpp:69
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
constexpr bool empty() const
Check if the entire border is zero.
Definition: Types.h:300
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
Definition: Dimensions.h:143
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:145
Num samples, height, width, channels.
TFPreproccessor(float min_range=-1.f, float max_range=1.f)
Constructor.
Definition: GraphUtils.cpp:65
void open(const std::string &npy_filename, DataLayout file_layout=DataLayout::NCHW)
Open a NPY file and reads its metadata.
Definition: Utils.h:342
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:568
Store the tensor&#39;s metadata.
Definition: TensorInfo.h:45
DummyAccessor(unsigned int maximum=1)
Constructor.
Definition: GraphUtils.cpp:160
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:77
void print(std::ostream &s, IOFormatInfo io_fmt=IOFormatInfo()) const
Print a tensor to a given stream using user defined formatting information.
T y() const
Alias to access the size of the second dimension.
Definition: Dimensions.h:92
PPMWriter(std::string name, unsigned int maximum=1)
Constructor.
Definition: GraphUtils.cpp:140
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:165
64-bit floating-point number
size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:193
PrintAccessor(std::ostream &output_stream=std::cout, IOFormatInfo io_fmt=IOFormatInfo())
Constructor.
unsigned 64-bit number
DataType
Available data types.
Definition: Types.h:77
void swap(Window &lhs, Window &rhs)
Definition: Window.inl:304
NumPyBinLoader(std::string filename, DataLayout file_layout=DataLayout::NCHW)
Default Constructor.
Definition: GraphUtils.cpp:770
unsigned int height() const
Return the height of the currently open image file.
Definition: ImageLoader.h:142
DataLayout
[DataLayout enum definition]
Definition: Types.h:120
signed 8-bit number
Describe a multidimensional execution window.
Definition: Window.h:39
ValidationInputAccessor(const std::string &image_list, std::string images_path, std::unique_ptr< IPreprocessor > preprocessor=nullptr, bool bgr=true, unsigned int start=0, unsigned int end=0, std::ostream &output_stream=std::cout)
Constructor.
Definition: GraphUtils.cpp:301
CaffePreproccessor(std::array< float, 3 > mean=std::array< float, 3 > { { 0, 0, 0 } }, bool bgr=true, float scale=1.f)
Default Constructor.
Definition: GraphUtils.cpp:101
void open(const std::string &filename) override
Open an image file and reads its metadata (Width, height)
Definition: ImageLoader.h:456
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.