Compute Library
 20.08
GraphUtils.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2019 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "utils/GraphUtils.h"
26 
28 #include "arm_compute/core/Types.h"
31 
32 #pragma GCC diagnostic push
33 #pragma GCC diagnostic ignored "-Wunused-parameter"
34 #include "utils/ImageLoader.h"
35 #pragma GCC diagnostic pop
36 #include "utils/Utils.h"
37 
38 #include <inttypes.h>
39 #include <iomanip>
40 #include <limits>
41 
42 using namespace arm_compute::graph_utils;
43 
44 namespace
45 {
46 std::pair<arm_compute::TensorShape, arm_compute::PermutationVector> compute_permutation_parameters(const arm_compute::TensorShape &shape,
48 {
49  // Set permutation parameters if needed
50  arm_compute::TensorShape permuted_shape = shape;
52  // Permute only if num_dimensions greater than 2
53  if(shape.num_dimensions() > 2)
54  {
56 
58  arm_compute::permute(permuted_shape, perm_shape);
59  }
60 
61  return std::make_pair(permuted_shape, perm);
62 }
63 } // namespace
64 
65 TFPreproccessor::TFPreproccessor(float min_range, float max_range)
66  : _min_range(min_range), _max_range(max_range)
67 {
68 }
70 {
71  if(tensor.info()->data_type() == DataType::F32)
72  {
73  preprocess_typed<float>(tensor);
74  }
75  else if(tensor.info()->data_type() == DataType::F16)
76  {
77  preprocess_typed<half>(tensor);
78  }
79  else
80  {
81  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
82  }
83 }
84 
85 template <typename T>
86 void TFPreproccessor::preprocess_typed(ITensor &tensor)
87 {
88  Window window;
89  window.use_tensor_dimensions(tensor.info()->tensor_shape());
90 
91  const float range = _max_range - _min_range;
92  execute_window_loop(window, [&](const Coordinates & id)
93  {
94  const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id));
95  float res = value / 255.f; // Normalize to [0, 1]
96  res = res * range + _min_range; // Map to [min_range, max_range]
97  *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = res;
98  });
99 }
100 
101 CaffePreproccessor::CaffePreproccessor(std::array<float, 3> mean, bool bgr, float scale)
102  : _mean(mean), _bgr(bgr), _scale(scale)
103 {
104  if(_bgr)
105  {
106  std::swap(_mean[0], _mean[2]);
107  }
108 }
109 
111 {
112  if(tensor.info()->data_type() == DataType::F32)
113  {
114  preprocess_typed<float>(tensor);
115  }
116  else if(tensor.info()->data_type() == DataType::F16)
117  {
118  preprocess_typed<half>(tensor);
119  }
120  else
121  {
122  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
123  }
124 }
125 
126 template <typename T>
127 void CaffePreproccessor::preprocess_typed(ITensor &tensor)
128 {
129  Window window;
130  window.use_tensor_dimensions(tensor.info()->tensor_shape());
132 
133  execute_window_loop(window, [&](const Coordinates & id)
134  {
135  const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id)) - T(_mean[id[channel_idx]]);
136  *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value * T(_scale);
137  });
138 }
139 
140 PPMWriter::PPMWriter(std::string name, unsigned int maximum)
141  : _name(std::move(name)), _iterator(0), _maximum(maximum)
142 {
143 }
144 
146 {
147  std::stringstream ss;
148  ss << _name << _iterator << ".ppm";
149 
150  arm_compute::utils::save_to_ppm(tensor, ss.str());
151 
152  _iterator++;
153  if(_maximum == 0)
154  {
155  return true;
156  }
157  return _iterator < _maximum;
158 }
159 
160 DummyAccessor::DummyAccessor(unsigned int maximum)
161  : _iterator(0), _maximum(maximum)
162 {
163 }
164 
166 {
167  ARM_COMPUTE_UNUSED(tensor);
168  bool ret = _maximum == 0 || _iterator < _maximum;
169  if(_iterator == _maximum)
170  {
171  _iterator = 0;
172  }
173  else
174  {
175  _iterator++;
176  }
177  return ret;
178 }
179 
180 NumPyAccessor::NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout, std::ostream &output_stream)
181  : _npy_tensor(), _filename(std::move(npy_path)), _output_stream(output_stream)
182 {
183  NumPyBinLoader loader(_filename, data_layout);
184 
186  info.set_data_layout(data_layout);
187 
188  _npy_tensor.allocator()->init(info);
189  _npy_tensor.allocator()->allocate();
190 
191  loader.access_tensor(_npy_tensor);
192 }
193 
194 template <typename T>
195 void NumPyAccessor::access_numpy_tensor(ITensor &tensor, T tolerance)
196 {
197  const int num_elements = tensor.info()->tensor_shape().total_size();
198  int num_mismatches = utils::compare_tensor<T>(tensor, _npy_tensor, tolerance);
199  float percentage_mismatches = static_cast<float>(num_mismatches) / num_elements;
200 
201  _output_stream << "Results: " << 100.f - (percentage_mismatches * 100) << " % matches with the provided output[" << _filename << "]." << std::endl;
202  _output_stream << " " << num_elements - num_mismatches << " out of " << num_elements << " matches with the provided output[" << _filename << "]." << std::endl
203  << std::endl;
204 }
205 
207 {
209  ARM_COMPUTE_ERROR_ON(_npy_tensor.info()->dimension(0) != tensor.info()->dimension(0));
210 
211  switch(tensor.info()->data_type())
212  {
213  case DataType::QASYMM8:
214  access_numpy_tensor<qasymm8_t>(tensor, 0);
215  break;
216  case DataType::F32:
217  access_numpy_tensor<float>(tensor, 0.0001f);
218  break;
219  default:
220  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
221  }
222 
223  return false;
224 }
225 
226 #ifdef ARM_COMPUTE_ASSERTS_ENABLED
227 PrintAccessor::PrintAccessor(std::ostream &output_stream, IOFormatInfo io_fmt)
228  : _output_stream(output_stream), _io_fmt(io_fmt)
229 {
230 }
231 
233 {
234  tensor.print(_output_stream, _io_fmt);
235  return false;
236 }
237 #endif /* ARM_COMPUTE_ASSERTS_ENABLED */
238 
239 SaveNumPyAccessor::SaveNumPyAccessor(std::string npy_name, const bool is_fortran)
240  : _npy_name(std::move(npy_name)), _is_fortran(is_fortran)
241 {
242 }
243 
245 {
247 
248  utils::save_to_npy(tensor, _npy_name, _is_fortran);
249 
250  return false;
251 }
252 
253 ImageAccessor::ImageAccessor(std::string filename, bool bgr, std::unique_ptr<IPreprocessor> preprocessor)
254  : _already_loaded(false), _filename(std::move(filename)), _bgr(bgr), _preprocessor(std::move(preprocessor))
255 {
256 }
257 
259 {
260  if(!_already_loaded)
261  {
262  auto image_loader = utils::ImageLoaderFactory::create(_filename);
263  ARM_COMPUTE_EXIT_ON_MSG(image_loader == nullptr, "Unsupported image type");
264 
265  // Open image file
266  image_loader->open(_filename);
267 
268  // Get permutated shape and permutation parameters
269  TensorShape permuted_shape = tensor.info()->tensor_shape();
271  if(tensor.info()->data_layout() != DataLayout::NCHW)
272  {
273  std::tie(permuted_shape, perm) = compute_permutation_parameters(tensor.info()->tensor_shape(), tensor.info()->data_layout());
274  }
275  ARM_COMPUTE_EXIT_ON_MSG_VAR(image_loader->width() != permuted_shape.x() || image_loader->height() != permuted_shape.y(),
276  "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu32 ",%" PRIu32 "].",
277  image_loader->width(), image_loader->height(), permuted_shape.x(), permuted_shape.y());
278 
279  // Fill the tensor with the PPM content (BGR)
280  image_loader->fill_planar_tensor(tensor, _bgr);
281 
282  // Preprocess tensor
283  if(_preprocessor)
284  {
285  _preprocessor->preprocess(tensor);
286  }
287  }
288 
289  _already_loaded = !_already_loaded;
290  return _already_loaded;
291 }
292 
294  std::string images_path,
295  std::unique_ptr<IPreprocessor> preprocessor,
296  bool bgr,
297  unsigned int start,
298  unsigned int end,
299  std::ostream &output_stream)
300  : _path(std::move(images_path)), _images(), _preprocessor(std::move(preprocessor)), _bgr(bgr), _offset(0), _output_stream(output_stream)
301 {
302  ARM_COMPUTE_EXIT_ON_MSG(start > end, "Invalid validation range!");
303 
304  std::ifstream ifs;
305  try
306  {
307  ifs.exceptions(std::ifstream::badbit);
308  ifs.open(image_list, std::ios::in | std::ios::binary);
309 
310  // Parse image names
311  unsigned int counter = 0;
312  for(std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter)
313  {
314  // Add image to process if withing range
315  if(counter >= start)
316  {
317  std::stringstream linestream(line);
318  std::string image_name;
319 
320  linestream >> image_name;
321  _images.emplace_back(std::move(image_name));
322  }
323  }
324  }
325  catch(const std::ifstream::failure &e)
326  {
327  ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", image_list.c_str(), e.what());
328  }
329 }
330 
332 {
333  bool ret = _offset < _images.size();
334  if(ret)
335  {
336  utils::JPEGLoader jpeg;
337 
338  // Open JPEG file
339  std::string image_name = _path + _images[_offset++];
340  jpeg.open(image_name);
341  _output_stream << "[" << _offset << "/" << _images.size() << "] Validating " << image_name << std::endl;
342 
343  // Get permutated shape and permutation parameters
344  TensorShape permuted_shape = tensor.info()->tensor_shape();
346  if(tensor.info()->data_layout() != DataLayout::NCHW)
347  {
348  std::tie(permuted_shape, perm) = compute_permutation_parameters(tensor.info()->tensor_shape(),
349  tensor.info()->data_layout());
350  }
351  ARM_COMPUTE_EXIT_ON_MSG_VAR(jpeg.width() != permuted_shape.x() || jpeg.height() != permuted_shape.y(),
352  "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu32 ",%" PRIu32 "].",
353  jpeg.width(), jpeg.height(), permuted_shape.x(), permuted_shape.y());
354 
355  // Fill the tensor with the JPEG content (BGR)
356  jpeg.fill_planar_tensor(tensor, _bgr);
357 
358  // Preprocess tensor
359  if(_preprocessor)
360  {
361  _preprocessor->preprocess(tensor);
362  }
363  }
364 
365  return ret;
366 }
367 
369  std::ostream &output_stream,
370  unsigned int start,
371  unsigned int end)
372  : _results(), _output_stream(output_stream), _offset(0), _positive_samples_top1(0), _positive_samples_top5(0)
373 {
374  ARM_COMPUTE_EXIT_ON_MSG(start > end, "Invalid validation range!");
375 
376  std::ifstream ifs;
377  try
378  {
379  ifs.exceptions(std::ifstream::badbit);
380  ifs.open(image_list, std::ios::in | std::ios::binary);
381 
382  // Parse image correctly classified labels
383  unsigned int counter = 0;
384  for(std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter)
385  {
386  // Add label if within range
387  if(counter >= start)
388  {
389  std::stringstream linestream(line);
390  std::string image_name;
391  int result;
392 
393  linestream >> image_name >> result;
394  _results.emplace_back(result);
395  }
396  }
397  }
398  catch(const std::ifstream::failure &e)
399  {
400  ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", image_list.c_str(), e.what());
401  }
402 }
403 
405 {
406  _offset = 0;
407  _positive_samples_top1 = 0;
408  _positive_samples_top5 = 0;
409 }
410 
412 {
413  bool ret = _offset < _results.size();
414  if(ret)
415  {
416  // Get results
417  std::vector<size_t> tensor_results;
418  switch(tensor.info()->data_type())
419  {
420  case DataType::QASYMM8:
421  tensor_results = access_predictions_tensor<uint8_t>(tensor);
422  break;
423  case DataType::F16:
424  tensor_results = access_predictions_tensor<half>(tensor);
425  break;
426  case DataType::F32:
427  tensor_results = access_predictions_tensor<float>(tensor);
428  break;
429  default:
430  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
431  }
432 
433  // Check if tensor results are within top-n accuracy
434  size_t correct_label = _results[_offset++];
435 
436  aggregate_sample(tensor_results, _positive_samples_top1, 1, correct_label);
437  aggregate_sample(tensor_results, _positive_samples_top5, 5, correct_label);
438  }
439 
440  // Report top_n accuracy
441  if(_offset >= _results.size())
442  {
443  report_top_n(1, _results.size(), _positive_samples_top1);
444  report_top_n(5, _results.size(), _positive_samples_top5);
445  }
446 
447  return ret;
448 }
449 
450 template <typename T>
451 std::vector<size_t> ValidationOutputAccessor::access_predictions_tensor(arm_compute::ITensor &tensor)
452 {
453  // Get the predicted class
454  std::vector<size_t> index;
455 
456  const auto output_net = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
457  const size_t num_classes = tensor.info()->dimension(0);
458 
459  index.resize(num_classes);
460 
461  // Sort results
462  std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
463  std::sort(std::begin(index), std::end(index),
464  [&](size_t a, size_t b)
465  {
466  return output_net[a] > output_net[b];
467  });
468 
469  return index;
470 }
471 
472 void ValidationOutputAccessor::aggregate_sample(const std::vector<size_t> &res, size_t &positive_samples, size_t top_n, size_t correct_label)
473 {
474  auto is_valid_label = [correct_label](size_t label)
475  {
476  return label == correct_label;
477  };
478 
479  if(std::any_of(std::begin(res), std::begin(res) + top_n, is_valid_label))
480  {
481  ++positive_samples;
482  }
483 }
484 
485 void ValidationOutputAccessor::report_top_n(size_t top_n, size_t total_samples, size_t positive_samples)
486 {
487  size_t negative_samples = total_samples - positive_samples;
488  float accuracy = positive_samples / static_cast<float>(total_samples);
489 
490  _output_stream << "----------Top " << top_n << " accuracy ----------" << std::endl
491  << std::endl;
492  _output_stream << "Positive samples : " << positive_samples << std::endl;
493  _output_stream << "Negative samples : " << negative_samples << std::endl;
494  _output_stream << "Accuracy : " << accuracy << std::endl;
495 }
496 
497 DetectionOutputAccessor::DetectionOutputAccessor(const std::string &labels_path, std::vector<TensorShape> &imgs_tensor_shapes, std::ostream &output_stream)
498  : _labels(), _tensor_shapes(std::move(imgs_tensor_shapes)), _output_stream(output_stream)
499 {
500  _labels.clear();
501 
502  std::ifstream ifs;
503 
504  try
505  {
506  ifs.exceptions(std::ifstream::badbit);
507  ifs.open(labels_path, std::ios::in | std::ios::binary);
508 
509  for(std::string line; !std::getline(ifs, line).fail();)
510  {
511  _labels.emplace_back(line);
512  }
513  }
514  catch(const std::ifstream::failure &e)
515  {
516  ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", labels_path.c_str(), e.what());
517  }
518 }
519 
520 template <typename T>
521 void DetectionOutputAccessor::access_predictions_tensor(ITensor &tensor)
522 {
523  const size_t num_detection = tensor.info()->valid_region().shape.y();
524  const auto output_prt = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
525 
526  if(num_detection > 0)
527  {
528  _output_stream << "---------------------- Detections ----------------------" << std::endl
529  << std::endl;
530 
531  _output_stream << std::left << std::setprecision(4) << std::setw(8) << "Image | " << std::setw(8) << "Label | " << std::setw(12) << "Confidence | "
532  << "[ xmin, ymin, xmax, ymax ]" << std::endl;
533 
534  for(size_t i = 0; i < num_detection; ++i)
535  {
536  auto im = static_cast<const int>(output_prt[i * 7]);
537  _output_stream << std::setw(8) << im << std::setw(8)
538  << _labels[output_prt[i * 7 + 1]] << std::setw(12) << output_prt[i * 7 + 2]
539  << " [" << (output_prt[i * 7 + 3] * _tensor_shapes[im].x())
540  << ", " << (output_prt[i * 7 + 4] * _tensor_shapes[im].y())
541  << ", " << (output_prt[i * 7 + 5] * _tensor_shapes[im].x())
542  << ", " << (output_prt[i * 7 + 6] * _tensor_shapes[im].y())
543  << "]" << std::endl;
544  }
545  }
546  else
547  {
548  _output_stream << "No detection found." << std::endl;
549  }
550 }
551 
553 {
555 
556  switch(tensor.info()->data_type())
557  {
558  case DataType::F32:
559  access_predictions_tensor<float>(tensor);
560  break;
561  default:
562  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
563  }
564 
565  return false;
566 }
567 
568 TopNPredictionsAccessor::TopNPredictionsAccessor(const std::string &labels_path, size_t top_n, std::ostream &output_stream)
569  : _labels(), _output_stream(output_stream), _top_n(top_n)
570 {
571  _labels.clear();
572 
573  std::ifstream ifs;
574 
575  try
576  {
577  ifs.exceptions(std::ifstream::badbit);
578  ifs.open(labels_path, std::ios::in | std::ios::binary);
579 
580  for(std::string line; !std::getline(ifs, line).fail();)
581  {
582  _labels.emplace_back(line);
583  }
584  }
585  catch(const std::ifstream::failure &e)
586  {
587  ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", labels_path.c_str(), e.what());
588  }
589 }
590 
591 template <typename T>
592 void TopNPredictionsAccessor::access_predictions_tensor(ITensor &tensor)
593 {
594  // Get the predicted class
595  std::vector<T> classes_prob;
596  std::vector<size_t> index;
597 
598  const auto output_net = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
599  const size_t num_classes = tensor.info()->dimension(0);
600 
601  classes_prob.resize(num_classes);
602  index.resize(num_classes);
603 
604  std::copy(output_net, output_net + num_classes, classes_prob.begin());
605 
606  // Sort results
607  std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
608  std::sort(std::begin(index), std::end(index),
609  [&](size_t a, size_t b)
610  {
611  return classes_prob[a] > classes_prob[b];
612  });
613 
614  _output_stream << "---------- Top " << _top_n << " predictions ----------" << std::endl
615  << std::endl;
616  for(size_t i = 0; i < _top_n; ++i)
617  {
618  _output_stream << std::fixed << std::setprecision(4)
619  << +classes_prob[index.at(i)]
620  << " - [id = " << index.at(i) << "]"
621  << ", " << _labels[index.at(i)] << std::endl;
622  }
623 }
624 
626 {
628  ARM_COMPUTE_ERROR_ON(_labels.size() != tensor.info()->dimension(0));
629 
630  switch(tensor.info()->data_type())
631  {
632  case DataType::QASYMM8:
633  access_predictions_tensor<uint8_t>(tensor);
634  break;
635  case DataType::F32:
636  access_predictions_tensor<float>(tensor);
637  break;
638  default:
639  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
640  }
641 
642  return false;
643 }
644 
645 RandomAccessor::RandomAccessor(PixelValue lower, PixelValue upper, std::random_device::result_type seed)
646  : _lower(lower), _upper(upper), _seed(seed)
647 {
648 }
649 
650 template <typename T, typename D>
651 void RandomAccessor::fill(ITensor &tensor, D &&distribution)
652 {
653  std::mt19937 gen(_seed);
654 
655  if(tensor.info()->padding().empty() && (dynamic_cast<SubTensor *>(&tensor) == nullptr))
656  {
657  for(size_t offset = 0; offset < tensor.info()->total_size(); offset += tensor.info()->element_size())
658  {
659  const auto value = static_cast<T>(distribution(gen));
660  *reinterpret_cast<T *>(tensor.buffer() + offset) = value;
661  }
662  }
663  else
664  {
665  // If tensor has padding accessing tensor elements through execution window.
666  Window window;
667  window.use_tensor_dimensions(tensor.info()->tensor_shape());
668 
669  execute_window_loop(window, [&](const Coordinates & id)
670  {
671  const auto value = static_cast<T>(distribution(gen));
672  *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value;
673  });
674  }
675 }
676 
678 {
679  switch(tensor.info()->data_type())
680  {
681  case DataType::QASYMM8:
682  case DataType::U8:
683  {
684  std::uniform_int_distribution<uint8_t> distribution_u8(_lower.get<uint8_t>(), _upper.get<uint8_t>());
685  fill<uint8_t>(tensor, distribution_u8);
686  break;
687  }
688  case DataType::S8:
689  {
690  std::uniform_int_distribution<int8_t> distribution_s8(_lower.get<int8_t>(), _upper.get<int8_t>());
691  fill<int8_t>(tensor, distribution_s8);
692  break;
693  }
694  case DataType::U16:
695  {
696  std::uniform_int_distribution<uint16_t> distribution_u16(_lower.get<uint16_t>(), _upper.get<uint16_t>());
697  fill<uint16_t>(tensor, distribution_u16);
698  break;
699  }
700  case DataType::S16:
701  {
702  std::uniform_int_distribution<int16_t> distribution_s16(_lower.get<int16_t>(), _upper.get<int16_t>());
703  fill<int16_t>(tensor, distribution_s16);
704  break;
705  }
706  case DataType::U32:
707  {
708  std::uniform_int_distribution<uint32_t> distribution_u32(_lower.get<uint32_t>(), _upper.get<uint32_t>());
709  fill<uint32_t>(tensor, distribution_u32);
710  break;
711  }
712  case DataType::S32:
713  {
714  std::uniform_int_distribution<int32_t> distribution_s32(_lower.get<int32_t>(), _upper.get<int32_t>());
715  fill<int32_t>(tensor, distribution_s32);
716  break;
717  }
718  case DataType::U64:
719  {
720  std::uniform_int_distribution<uint64_t> distribution_u64(_lower.get<uint64_t>(), _upper.get<uint64_t>());
721  fill<uint64_t>(tensor, distribution_u64);
722  break;
723  }
724  case DataType::S64:
725  {
726  std::uniform_int_distribution<int64_t> distribution_s64(_lower.get<int64_t>(), _upper.get<int64_t>());
727  fill<int64_t>(tensor, distribution_s64);
728  break;
729  }
730  case DataType::F16:
731  {
732  std::uniform_real_distribution<float> distribution_f16(_lower.get<half>(), _upper.get<half>());
733  fill<half>(tensor, distribution_f16);
734  break;
735  }
736  case DataType::F32:
737  {
738  std::uniform_real_distribution<float> distribution_f32(_lower.get<float>(), _upper.get<float>());
739  fill<float>(tensor, distribution_f32);
740  break;
741  }
742  case DataType::F64:
743  {
744  std::uniform_real_distribution<double> distribution_f64(_lower.get<double>(), _upper.get<double>());
745  fill<double>(tensor, distribution_f64);
746  break;
747  }
748  default:
749  ARM_COMPUTE_ERROR("NOT SUPPORTED!");
750  }
751  return true;
752 }
753 
754 NumPyBinLoader::NumPyBinLoader(std::string filename, DataLayout file_layout)
755  : _already_loaded(false), _filename(std::move(filename)), _file_layout(file_layout)
756 {
757 }
758 
760 {
761  if(!_already_loaded)
762  {
763  utils::NPYLoader loader;
764  loader.open(_filename, _file_layout);
765  loader.fill_tensor(tensor);
766  }
767 
768  _already_loaded = !_already_loaded;
769  return _already_loaded;
770 }
SaveNumPyAccessor(const std::string npy_name, const bool is_fortran=false)
Constructor.
Definition: GraphUtils.cpp:239
ImageAccessor(std::string filename, bool bgr=true, std::unique_ptr< IPreprocessor > preprocessor=nullptr)
Constructor.
Definition: GraphUtils.cpp:253
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Definition: helpers.h:719
void save_to_ppm(T &tensor, const std::string &ppm_filename)
Template helper function to save a tensor image to a PPM file.
Definition: Utils.h:547
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
Class to load the content of a JPEG file into an Image.
Definition: ImageLoader.h:432
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:625
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
Definition: ITensor.h:63
Shape of a tensor.
Definition: TensorShape.h:39
const DataLayout data_layout
Definition: Im2Col.cpp:146
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo &sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
TensorShape shape
Shape of the valid region.
Definition: Types.h:260
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
SimpleTensor< float > b
Definition: DFT.cpp:157
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:206
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
1 channel, 1 U8 per channel
void get(uint8_t &v) const
Interpret the pixel value as a U8.
Definition: PixelValue.h:241
#define ARM_COMPUTE_EXIT_ON_MSG_VAR(cond, msg,...)
If the condition is true, the given message is printed and program exits.
Definition: Error.h:395
virtual DataType data_type() const =0
Data type used for each element of the tensor.
void fill_tensor(T &tensor)
Fill a tensor with the content of the currently open NPY file.
Definition: Utils.h:406
half_float::half half
16-bit floating point type
Definition: Types.h:46
1 channel, 1 F32 per channel
Strides PermutationVector
Permutation vector.
Definition: Types.h:49
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:759
#define ARM_COMPUTE_ERROR_VAR(msg,...)
Print the given message then throw an std::runtime_error.
Definition: Error.h:346
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
ValidationOutputAccessor(const std::string &image_list, std::ostream &output_stream=std::cout, unsigned int start=0, unsigned int end=0)
Default Constructor.
Definition: GraphUtils.cpp:368
static std::unique_ptr< IImageLoader > create(const std::string &filename)
Create an image loader depending on the image type.
Definition: ImageLoader.h:509
1 channel, 1 U16 per channel
void preprocess(ITensor &tensor) override
Preprocess the given tensor.
Definition: GraphUtils.cpp:110
RandomAccessor(PixelValue lower, PixelValue upper, const std::random_device::result_type seed=0)
Constructor.
Definition: GraphUtils.cpp:645
SimpleTensor< T > copy(const SimpleTensor< T > &src, const TensorShape &output_shape)
Definition: Copy.cpp:37
void fill_planar_tensor(T &tensor, bool bgr=false)
Fill a tensor with 3 planes (one for each channel) with the content of the currently open image file.
Definition: ImageLoader.h:257
Interface for NEON tensor.
Definition: ITensor.h:36
void use_tensor_dimensions(const TensorShape &shape, size_t first_dimension=Window::DimX)
Use the tensor's dimensions to fill the window dimensions.
Definition: Window.inl:276
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:244
1 channel, 1 F16 per channel
virtual ValidRegion valid_region() const =0
Valid region of the tensor.
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
Definition: Tensor.cpp:48
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor's metadata.
Definition: Tensor.cpp:33
void permute(Dimensions< T > &dimensions, const PermutationVector &perm)
Permutes given Dimensions according to a permutation vector.
Definition: Helpers.h:605
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:331
1 channel, 1 S32 per channel
T x() const
Alias to access the size of the first dimension.
Definition: Dimensions.h:81
signed 64-bit number
Numpy Binary loader class.
Definition: GraphUtils.h:431
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
#define ARM_COMPUTE_EXIT_ON_MSG(cond, msg)
If the condition is true, the given message is printed and program exits.
Definition: Error.h:379
DetectionOutputAccessor(const std::string &labels_path, std::vector< TensorShape > &imgs_tensor_shapes, std::ostream &output_stream=std::cout)
Constructor.
Definition: GraphUtils.cpp:497
void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
Template helper function to save a tensor image to a NPY file.
Definition: Utils.h:627
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:677
1 channel, 1 U32 per channel
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
SimpleTensor< T > range(SimpleTensor< T > &dst, float start, const size_t num_of_elements, float step)
Definition: Range.cpp:50
Numpy data loader.
Definition: Utils.h:328
IO formatting information class.
Definition: Types.h:2134
quantized, asymmetric fixed-point 8-bit number unsigned
unsigned int width() const
Return the width of the currently open image file.
Definition: ImageLoader.h:137
Coordinates of an item.
Definition: Coordinates.h:37
virtual uint8_t * buffer() const =0
Interface to be implemented by the child class to return a pointer to CPU memory.
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
size_t total_size() const
Collapses all dimensions to a single linear total size.
Definition: TensorShape.h:171
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
virtual size_t element_size() const =0
Element size in bytes calculated as data_size() * num_channels()
virtual PaddingSize padding() const =0
Padding of tensor.
std::uniform_real_distribution< float > distribution(-5.f, 5.f)
NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout=DataLayout::NCHW, std::ostream &output_stream=std::cout)
Constructor.
Definition: GraphUtils.cpp:180
1 channel, 1 S16 per channel
TopNPredictionsAccessor(const std::string &labels_path, size_t top_n=5, std::ostream &output_stream=std::cout)
Constructor.
Definition: GraphUtils.cpp:568
Num samples, channels, height, width.
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
Definition: Validate.h:790
Strides of an item in bytes.
Definition: Strides.h:37
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
virtual size_t offset_first_element_in_bytes() const =0
The offset from the beginning of the memory allocation to the first element of the tensor.
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:258
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:411
void preprocess(ITensor &tensor) override
Preprocess the given tensor.
Definition: GraphUtils.cpp:69
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
constexpr bool empty() const
Check if the entire border is zero.
Definition: Types.h:299
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:145
Num samples, height, width, channels.
TFPreproccessor(float min_range=-1.f, float max_range=1.f)
Constructor.
Definition: GraphUtils.cpp:65
void open(const std::string &npy_filename, DataLayout file_layout=DataLayout::NCHW)
Open a NPY file and reads its metadata.
Definition: Utils.h:342
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:552
Store the tensor's metadata.
Definition: TensorInfo.h:45
DummyAccessor(unsigned int maximum=1)
Constructor.
Definition: GraphUtils.cpp:160
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
Definition: Helpers.inl:128
void print(std::ostream &s, IOFormatInfo io_fmt=IOFormatInfo()) const
Print a tensor to a given stream using user defined formatting information.
T y() const
Alias to access the size of the second dimension.
Definition: Dimensions.h:86
PPMWriter(std::string name, unsigned int maximum=1)
Constructor.
Definition: GraphUtils.cpp:140
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Definition: GraphUtils.cpp:165
64-bit floating-point number
size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get the index of the given dimension.
Definition: Helpers.inl:332
PrintAccessor(std::ostream &output_stream=std::cout, IOFormatInfo io_fmt=IOFormatInfo())
Constructor.
SimpleTensor< T > scale(const SimpleTensor< T > &src, float scale_x, float scale_y, InterpolationPolicy policy, BorderMode border_mode, T constant_border_value, SamplingPolicy sampling_policy, bool ceil_policy_scale, bool align_corners)
Definition: Scale.cpp:185
unsigned 64-bit number
DataType
Available data types.
Definition: Types.h:77
void swap(Window &lhs, Window &rhs)
Definition: Window.inl:304
NumPyBinLoader(std::string filename, DataLayout file_layout=DataLayout::NCHW)
Default Constructor.
Definition: GraphUtils.cpp:754
unsigned int height() const
Return the height of the currently open image file.
Definition: ImageLoader.h:142
DataLayout
[DataLayout enum definition]
Definition: Types.h:120
signed 8-bit number
Describe a multidimensional execution window.
Definition: Window.h:39
ValidationInputAccessor(const std::string &image_list, std::string images_path, std::unique_ptr< IPreprocessor > preprocessor=nullptr, bool bgr=true, unsigned int start=0, unsigned int end=0, std::ostream &output_stream=std::cout)
Constructor.
Definition: GraphUtils.cpp:293
CaffePreproccessor(std::array< float, 3 > mean=std::array< float, 3 > { { 0, 0, 0 } }, bool bgr=true, float scale=1.f)
Default Constructor.
Definition: GraphUtils.cpp:101
void open(const std::string &filename) override
Open an image file and reads its metadata (Width, height)
Definition: ImageLoader.h:456
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.