Compute Library
 23.11
DependencyGraph.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef SRC_DYNAMIC_FUSION_SKETCH_UTILS_DEPENDENCYGRAPH
25 #define SRC_DYNAMIC_FUSION_SKETCH_UTILS_DEPENDENCYGRAPH
26 
27 #include "arm_compute/core/Error.h"
28 
29 #include <cstdint>
30 #include <map>
31 #include <set>
32 #include <tuple>
33 #include <vector>
34 
35 namespace arm_compute
36 {
37 namespace experimental
38 {
39 namespace dynamic_fusion
40 {
41 namespace
42 {
43 template <typename T>
44 bool is_in(const T &v, const std::vector<T> &vec)
45 {
46  return std::find(std::begin(vec), std::end(vec), v) != std::end(vec);
47 }
48 } // namespace
49 
50 /** A multi-input (tensors), multi-output (tensors) acyclic directed graph
51  * Represented as a doubly-linked adjacency list with the differentiation between source and destination
52  */
54 {
55 public:
56  using Id = int32_t;
57  using TensorId = Id;
58  using OperatorId = Id;
59  /** Adjacency list
60  *
61  */
62  using AdjList = std::map<Id, std::vector<Id>>;
63 
64  /** A pack of operator including its input and output tensors, used by traversing through the graph in topological order
65  *
66  */
67  struct OpPack
68  {
70  std::vector<TensorId> inputs{};
71  std::vector<TensorId> outputs{};
72  friend bool operator==(const OpPack &opp0, const OpPack &opp1)
73  {
74  return std::make_tuple(opp0.op, opp0.inputs, opp0.outputs) ==
75  std::make_tuple(opp1.op, opp1.inputs, opp1.outputs);
76  }
77  };
78 
79 public:
80  DependencyGraph() = default;
81  friend std::ostream &operator<<(std::ostream &os, const DependencyGraph &);
82 
83  /** Try adding an operator (without actually adding it), while keeping the graph as a "linear sequence" / list
84  *
85  * Rule: If the new operator is not the first operator, at least one input tensor must be
86  * the output tensor of the last non-output operator. All other input tensors must be
87  * the global input of the graph (i.e. not the output of any operator).
88  *
89  * Rule: The output tensor of the new operator must not be the input tensor of any previously
90  * added operator.
91  *
92  * PRECONDITION: The current graph is already linear
93  *
94  * @return true If the operator can be added while keeping the graph as a linear sequence
95  * @return false Otherwise
96  */
98  const std::vector<TensorId> &inputs,
99  const std::vector<TensorId> &outputs,
100  bool is_output = false) const
101  {
102  ARM_COMPUTE_UNUSED(op, is_output);
103  if (all_ops().empty())
104  {
105  return true;
106  }
107 
108  // If the new operator is not the first operator, at least one input tensor must be
109  // the output tensor of the last non-output operator. All other input tensors must be
110  // the global input of the graph (i.e. not the output of any operator).
111  if (_last_op_available)
112  {
113  auto use_input_from_last_op = false;
114 
115  for (auto src_tensor : inputs)
116  {
117  const auto src_ops = _adj_src_ops.find(src_tensor);
118 
119  if (src_ops != _adj_src_ops.end())
120  {
121  ARM_COMPUTE_ERROR_ON(src_ops->second.size() > 1);
122 
123  if (!src_ops->second.empty())
124  {
125  const auto src_op = src_ops->second[0];
126 
127  if (src_op == _last_op)
128  {
129  if (use_input_from_last_op)
130  {
131  // To be safe, we also forbid using the output tensor
132  // of the last operator twice.
133  return false;
134  }
135 
136  use_input_from_last_op = true;
137  }
138  else
139  {
140  // The input tensor of this operator must not be the output tensor
141  // of any other operator except the last non-output operator.
142  return false;
143  }
144  }
145  }
146  }
147 
148  if (!use_input_from_last_op)
149  {
150  // At least one input tensor must be the output tensor of the last non-output operator.
151  return false;
152  }
153  }
154 
155  // The output tensor of the new operator must not be the input tensor of any previously
156  // added operator.
157  for (auto dst_tensor : outputs)
158  {
159  if (_adj_dst_ops.find(dst_tensor) != _adj_dst_ops.end())
160  {
161  return false;
162  }
163  }
164 
165  return true;
166  }
167  /** Add an operator, while keeping the graph as a "linear sequence"
168  *
169  * PRECONDITION: The current graph is already linear
170  * INVARIANT: The list can only grow from head to tail
171  * INVARIANT: POSTCONDITION: The graph is linear
172  */
174  const std::vector<TensorId> &inputs,
175  const std::vector<TensorId> &outputs,
176  bool is_output = false)
177  {
178  const auto success = add_operator(op, inputs, outputs, is_output);
179  ARM_COMPUTE_UNUSED(success);
180  ARM_COMPUTE_ERROR_ON(!success);
181  }
182  /** Add a new operator
183  * Return invalid if it violates the DAG invariant
184  * Invalid operation will not change the graph
185  *
186  * @param[in] op Operator to add
187  * @param[in] inputs Input tensors to the operator
188  * @param[in] outputs Output tensors to the operator
189  * @param[in] is_output Whether this is an output operator
190  */
192  const std::vector<TensorId> &inputs,
193  const std::vector<TensorId> &outputs,
194  bool is_output = false)
195  {
196  if (operator_exists(op))
197  {
198  return false;
199  }
200  _adj_src_tensors[op] = {};
201  _adj_dst_tensors[op] = {};
202  for (auto in_tensor : inputs)
203  {
204  // Linking input tensor to operator node will never create a cycle / loop because we guarantee
205  // each op is newly created, so every <input, op> pair / edge is new
206  link_input(op, in_tensor);
207  }
208  for (auto out_tensor : outputs)
209  {
210  // If there exists a back path from op's output tensor to op already, then linking the two will create a loop / cycle
211  if (path_exists_from_tensor_to_op(out_tensor, op))
212  {
213  remove_operator(op);
214  return false;
215  }
216  else
217  {
218  link_output(op, out_tensor);
219  }
220  }
221 
222  if (!is_output)
223  {
224  _last_op_available = true;
225  _last_op = op;
226  }
227 
228  return true;
229  }
230 
231  /** Build a sequence of operators from the acyclic graph of operators.
232  *
233  * The graph will be visited in depth-first strategy. The operator can only be added to
234  * the sequence when all operators that supply the input tensors have been added. Otherwise,
235  * the operator will be ignored and later visited again. In other words, the dependency between
236  * operators will be preserved in the sequence.
237  */
238  std::vector<OpPack> build_operators_sequence() const
239  {
240  std::vector<OpPack> ops_seq;
241  std::set<Id> done_ops;
242  std::set<Id> done_tensors;
243 
244  const auto input_tensors = global_src_tensors();
245 
246  for (auto tensor : input_tensors)
247  {
248  done_tensors.insert(tensor);
249 
250  for (auto op : _adj_dst_ops.at(tensor))
251  {
252  build_operators_sequence_from_op(op, ops_seq, done_ops, done_tensors);
253  }
254  }
255 
256  return ops_seq;
257  }
258 
259  /** Strict equality comparison (all internal ids and order of insertion matter).
260  * In the future this may be replaced with a topological comparison, allowing equivalent graphs with different internal ids to be equal
261  *
262  *
263  * @param[in] g0
264  * @param[in] g1
265  * @return true If the same
266  * @return false Otherwise
267  */
268  friend bool operator==(const DependencyGraph &g0, const DependencyGraph &g1)
269  {
270  // Do not compare id allocators
271  return std::make_tuple(g0._adj_src_tensors, g0._adj_dst_tensors, g0._adj_src_ops, g0._adj_dst_ops) ==
272  std::make_tuple(g1._adj_src_tensors, g1._adj_dst_tensors, g1._adj_src_ops, g1._adj_dst_ops);
273  }
274  std::vector<OperatorId> src_ops_from_tensor(TensorId tensor) const
275  {
276  return _adj_src_ops.at(tensor);
277  }
278  std::vector<OperatorId> dst_ops_from_tensor(TensorId tensor) const
279  {
280  return _adj_dst_ops.at(tensor);
281  }
282  /** Get all tensors
283  *
284  * @return std::vector<TensorId>
285  */
286  std::vector<TensorId> all_tensors() const
287  {
288  std::vector<TensorId> tensors{};
289  std::transform(std::begin(_adj_src_ops), std::end(_adj_src_ops), std::back_inserter(tensors),
290  [](const auto &it) { return it.first; });
291  return tensors;
292  }
293  /** Get source tensors of the whole graph
294  *
295  * @return std::vector<TensorId>
296  */
297  std::vector<TensorId> global_src_tensors() const
298  {
299  std::vector<TensorId> tensors;
300  for (auto tensor_src_ops : _adj_src_ops)
301  {
302  if (tensor_src_ops.second.empty())
303  {
304  tensors.push_back(tensor_src_ops.first);
305  }
306  }
307  return tensors;
308  }
309  /** Get destination tensors of the whole graph
310  *
311  * @return std::vector<TensorId>
312  */
313  std::vector<TensorId> global_dst_tensors() const
314  {
315  std::vector<TensorId> tensors;
316  for (auto tensor_dst_ops : _adj_dst_ops)
317  {
318  if (tensor_dst_ops.second.empty())
319  {
320  tensors.push_back(tensor_dst_ops.first);
321  }
322  }
323  return tensors;
324  }
325  /** Get intermediate tensors of the whole graph.
326  *
327  * @return std::vector<TensorId>
328  */
329  std::vector<TensorId> intermediate_tensors() const
330  {
331  std::vector<TensorId> tensors;
332 
333  // If a tensor is used to connect the input of an operator and the output of another operator,
334  // it is not allocated in the memory. The tensor exists as a temporary variable only.
335  for (auto src_tensor : _adj_src_ops)
336  {
337  if (!src_tensor.second.empty())
338  {
339  const auto dst_tensor = _adj_dst_ops.find(src_tensor.first);
340  if (dst_tensor != _adj_dst_ops.end())
341  {
342  if (!dst_tensor->second.empty())
343  {
344  tensors.push_back(src_tensor.first);
345  }
346  }
347  }
348  }
349 
350  return tensors;
351  }
352  /** Get all root ops. Root ops can also be referred to as "src ops" of the whole graph
353  *
354  * @return std::vector<OperatorId>
355  */
356  std::vector<OperatorId> get_root_ops() const
357  {
358  std::vector<OperatorId> ops{};
359  const auto op_list = all_ops();
360 
361  for (auto op : op_list)
362  {
363  if (src_ops(op).empty())
364  {
365  ops.emplace_back(op);
366  }
367  }
368  return ops;
369  }
370 
371 private:
372  void link_input(OperatorId op, TensorId in_tensor)
373  {
374  ARM_COMPUTE_ERROR_ON(!operator_exists(op));
375  if (!tensor_exists(in_tensor))
376  {
377  insert_new_tensor(in_tensor);
378  }
379  ARM_COMPUTE_ERROR_ON(are_connected(op, in_tensor)); // Prevent repetitive linking
380  _adj_src_tensors[op].push_back(in_tensor);
381  _adj_dst_ops[in_tensor].push_back(op);
382  }
383  void link_output(OperatorId op, TensorId out_tensor)
384  {
385  ARM_COMPUTE_ERROR_ON(!operator_exists(op));
386  if (!tensor_exists(out_tensor))
387  {
388  insert_new_tensor(out_tensor);
389  }
390  ARM_COMPUTE_ERROR_ON(are_connected(op, out_tensor)); // Prevent repetitive linking
391  _adj_dst_tensors[op].push_back(out_tensor);
392  _adj_src_ops[out_tensor].push_back(op);
393  }
394 
395  std::vector<OperatorId> src_ops(OperatorId op) const
396  {
397  ARM_COMPUTE_ERROR_ON(!operator_exists(op));
398  std::vector<OperatorId> ops{};
399  for (TensorId src_tensor : src_tensors(op))
400  {
401  ops.insert(ops.end(), std::begin(_adj_src_ops.at(src_tensor)), std::end(_adj_src_ops.at(src_tensor)));
402  }
403  return ops;
404  }
405  std::vector<OperatorId> dst_ops(OperatorId op) const
406  {
407  ARM_COMPUTE_ERROR_ON(!operator_exists(op));
408  std::vector<OperatorId> ops{};
409  for (TensorId dst_tensor : _adj_dst_tensors.at(op))
410  {
411  ops.insert(ops.end(), std::begin(_adj_dst_ops.at(dst_tensor)), std::end(_adj_dst_ops.at(dst_tensor)));
412  }
413  return ops;
414  }
415 
416  /** Get source tensors to an operator
417  *
418  * @param[in] op
419  * @return std::vector<TensorId>
420  */
421  std::vector<TensorId> src_tensors(OperatorId op) const
422  {
423  ARM_COMPUTE_ERROR_ON(!operator_exists(op));
424  return _adj_src_tensors.at(op);
425  }
426  /** Get destination tensors to an operator
427  *
428  * @param[in] op
429  * @return std::vector<TensorId>
430  */
431  std::vector<TensorId> dst_tensors(OperatorId op) const
432  {
433  ARM_COMPUTE_ERROR_ON(!operator_exists(op));
434  return _adj_dst_tensors.at(op);
435  }
436  /** Get all operators
437  *
438  * @return std::vector<OperatorId>
439  */
440  std::vector<OperatorId> all_ops() const
441  {
442  std::vector<OperatorId> ops{};
443  std::transform(std::begin(_adj_src_tensors), std::end(_adj_src_tensors), std::back_inserter(ops),
444  [](const auto &it) { return it.first; });
445  return ops;
446  }
447  /** Remove an operator from graph.
448  *
449  * @param[in] op
450  */
451  void remove_operator(OperatorId op)
452  {
453  for (auto src_tensor : _adj_src_tensors.at(op))
454  {
455  auto &dst_ops = _adj_dst_ops.at(src_tensor);
456  dst_ops.erase(std::remove(std::begin(dst_ops), std::end(dst_ops), op), std::end(dst_ops));
457  }
458  for (auto dst_tensor : _adj_dst_tensors.at(op))
459  {
460  auto &src_ops = _adj_src_ops.at(dst_tensor);
461  src_ops.erase(std::remove(std::begin(src_ops), std::end(src_ops), op), std::end(src_ops));
462  }
463  // Remove any isolated tensors
464  // An isolated tensor is one where both its _adj_src_ops and _adj_dst_ops are empty
465  for (auto t : all_tensors())
466  {
467  if (_adj_src_ops.at(t).empty() && _adj_dst_ops.at(t).empty())
468  {
469  _adj_src_ops.erase(t);
470  _adj_dst_ops.erase(t);
471  }
472  }
473  _adj_src_tensors.erase(op);
474  _adj_dst_tensors.erase(op);
475  }
476  void insert_new_tensor(TensorId tensor)
477  {
478  _adj_src_ops[tensor] = {};
479  _adj_dst_ops[tensor] = {};
480  }
481  bool tensor_exists(TensorId tensor) const
482  {
483  return _adj_src_ops.find(tensor) != _adj_src_ops.end() && _adj_dst_ops.find(tensor) != _adj_dst_ops.end();
484  }
485  bool operator_exists(OperatorId op) const
486  {
487  return _adj_src_tensors.find(op) != _adj_src_tensors.end() &&
488  _adj_dst_tensors.find(op) != _adj_dst_tensors.end();
489  }
490  bool is_src_tensor_of(OperatorId op, TensorId tensor) const
491  {
492  if (!operator_exists(op) || !tensor_exists(tensor))
493  {
494  return false;
495  }
496  const auto op_inputs = src_tensors(op);
497  return std::find(op_inputs.begin(), op_inputs.end(), tensor) != op_inputs.end();
498  }
499  bool is_dst_tensor_of(OperatorId op, TensorId tensor) const
500  {
501  if (!operator_exists(op) || !tensor_exists(tensor))
502  {
503  return false;
504  }
505  const auto op_outputs = dst_tensors(op);
506  return std::find(op_outputs.begin(), op_outputs.end(), tensor) != op_outputs.end();
507  }
508  bool are_connected(OperatorId op, TensorId tensor) const
509  {
510  return is_src_tensor_of(op, tensor) || is_dst_tensor_of(op, tensor);
511  }
512  /** If op is the destination / leaf operator of the whole graph
513  *
514  * @param[in] op
515  * @return true
516  * @return false
517  */
518  bool is_dst_op(OperatorId op) const
519  {
520  return dst_ops(op).empty();
521  }
522  std::vector<OperatorId> get_dst_ops() const
523  {
524  std::vector<OperatorId> ops{};
525  const auto op_list = all_ops();
526 
527  for (auto op : op_list)
528  {
529  if (is_dst_op(op))
530  {
531  ops.emplace_back(op);
532  }
533  }
534  return ops;
535  }
536  bool path_exists_from_tensor_to_op(TensorId src_tensor, OperatorId dst_op) const
537  {
538  if (!tensor_exists(src_tensor) || !operator_exists(dst_op))
539  {
540  return false;
541  }
542  for (auto child_op : dst_ops_from_tensor(src_tensor))
543  {
544  if (path_exists_from_op_to_op(child_op, dst_op))
545  {
546  return true;
547  }
548  }
549  return false;
550  }
551 
552  bool path_exists_from_op_to_op(OperatorId src_op, OperatorId dst_op) const
553  {
554  if (!operator_exists(src_op) || !operator_exists(dst_op))
555  {
556  return false;
557  }
558  if (src_op == dst_op)
559  {
560  return true;
561  }
562  if (is_in(src_op, get_dst_ops()))
563  {
564  return false;
565  }
566  for (auto child_tensor : dst_tensors(src_op))
567  {
568  if (path_exists_from_tensor_to_op(child_tensor, dst_op))
569  {
570  return true;
571  }
572  }
573  return false;
574  }
575 
576  void build_operators_sequence_from_op(Id op,
577  std::vector<OpPack> &ops_seq,
578  std::set<Id> &done_ops,
579  std::set<Id> &done_tensors) const
580  {
581  while (true)
582  {
583  // If the operator has been added to the sequence, ignore it.
584  if (done_ops.find(op) != done_ops.end())
585  {
586  return;
587  }
588 
589  // If not all the input tensors of the operator are available, this operator cannot be
590  // added to the sequence for now. It will be visited again after the source operator
591  // is added to the sequence.
592  const auto src_tensors = _adj_src_tensors.at(op);
593 
594  for (auto src : src_tensors)
595  {
596  if (done_tensors.find(src) == done_tensors.end())
597  {
598  return;
599  }
600  }
601 
602  // This operator is ready to be added to the sequence.
603  const auto dst_tensors = _adj_dst_tensors.at(op);
604 
605  done_ops.insert(op);
606 
607  OpPack pack{op, src_tensors, dst_tensors};
608  ops_seq.push_back(pack);
609 
610  done_tensors.insert(dst_tensors.begin(), dst_tensors.end());
611 
612  // Visit all the sink operators.
613  // Call this function recursively unless there is only one sink.
614  if (dst_tensors.size() == 1 && _adj_dst_ops.at(dst_tensors[0]).size() == 1)
615  {
616  op = _adj_dst_ops.at(dst_tensors[0])[0];
617  }
618  else
619  {
620  for (auto dst_tensor : dst_tensors)
621  {
622  const auto dst_ops = _adj_dst_ops.at(dst_tensor);
623 
624  for (auto dst_op : dst_ops)
625  {
626  build_operators_sequence_from_op(dst_op, ops_seq, done_ops, done_tensors);
627  }
628  }
629 
630  return;
631  }
632  }
633  }
634 
635 private:
636  AdjList _adj_src_tensors{};
637  AdjList _adj_dst_tensors{};
638  AdjList _adj_src_ops{};
639  AdjList _adj_dst_ops{};
640 
641  bool _last_op_available{false};
642  OperatorId _last_op{0};
643 };
644 
645 } // namespace dynamic_fusion
646 } // namespace experimental
647 } // namespace arm_compute
648 #endif /* SRC_DYNAMIC_FUSION_SKETCH_UTILS_DEPENDENCYGRAPH */
arm_compute::experimental::dynamic_fusion::DependencyGraph::try_add_operator_as_linear
bool try_add_operator_as_linear(OperatorId op, const std::vector< TensorId > &inputs, const std::vector< TensorId > &outputs, bool is_output=false) const
Try adding an operator (without actually adding it), while keeping the graph as a "linear sequence" /...
Definition: DependencyGraph.h:97
arm_compute::test::validation::src
SimpleTensor< float > src
Definition: DFT.cpp:155
arm_compute::experimental::dynamic_fusion::DependencyGraph::get_root_ops
std::vector< OperatorId > get_root_ops() const
Get all root ops.
Definition: DependencyGraph.h:356
arm_compute::experimental::dynamic_fusion::DependencyGraph::Id
int32_t Id
Definition: DependencyGraph.h:56
arm_compute::experimental::dynamic_fusion::DependencyGraph::OperatorId
Id OperatorId
Definition: DependencyGraph.h:58
arm_compute::experimental::dynamic_fusion::DependencyGraph::DependencyGraph
DependencyGraph()=default
arm_compute::experimental::dynamic_fusion::DependencyGraph::add_operator_as_linear
void add_operator_as_linear(OperatorId op, const std::vector< TensorId > &inputs, const std::vector< TensorId > &outputs, bool is_output=false)
Add an operator, while keeping the graph as a "linear sequence".
Definition: DependencyGraph.h:173
arm_compute::experimental::dynamic_fusion::DependencyGraph::operator==
friend bool operator==(const DependencyGraph &g0, const DependencyGraph &g1)
Strict equality comparison (all internal ids and order of insertion matter).
Definition: DependencyGraph.h:268
arm_compute::experimental::dynamic_fusion::DependencyGraph::src_ops_from_tensor
std::vector< OperatorId > src_ops_from_tensor(TensorId tensor) const
Definition: DependencyGraph.h:274
arm_compute::experimental::dynamic_fusion::DependencyGraph::all_tensors
std::vector< TensorId > all_tensors() const
Get all tensors.
Definition: DependencyGraph.h:286
Error.h
arm_compute::experimental::dynamic_fusion::DependencyGraph::TensorId
Id TensorId
Definition: DependencyGraph.h:57
arm_compute::experimental::dynamic_fusion::DependencyGraph
A multi-input (tensors), multi-output (tensors) acyclic directed graph Represented as a doubly-linked...
Definition: DependencyGraph.h:53
arm_compute::experimental::dynamic_fusion::DependencyGraph::dst_ops_from_tensor
std::vector< OperatorId > dst_ops_from_tensor(TensorId tensor) const
Definition: DependencyGraph.h:278
ARM_COMPUTE_ERROR_ON
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
arm_compute::experimental::dynamic_fusion::DependencyGraph::add_operator
bool add_operator(OperatorId op, const std::vector< TensorId > &inputs, const std::vector< TensorId > &outputs, bool is_output=false)
Add a new operator Return invalid if it violates the DAG invariant Invalid operation will not change ...
Definition: DependencyGraph.h:191
arm_compute::experimental::dynamic_fusion::DependencyGraph::global_src_tensors
std::vector< TensorId > global_src_tensors() const
Get source tensors of the whole graph.
Definition: DependencyGraph.h:297
arm_compute::experimental::dynamic_fusion::DependencyGraph::build_operators_sequence
std::vector< OpPack > build_operators_sequence() const
Build a sequence of operators from the acyclic graph of operators.
Definition: DependencyGraph.h:238
arm_compute::experimental::dynamic_fusion::OperatorId
DependencyGraph::OperatorId OperatorId
Definition: GpuOperatorGroup.h:41
arm_compute::experimental::dynamic_fusion::DependencyGraph::OpPack::inputs
std::vector< TensorId > inputs
Definition: DependencyGraph.h:70
arm_compute::experimental::dynamic_fusion::DependencyGraph::operator<<
friend std::ostream & operator<<(std::ostream &os, const DependencyGraph &)
arm_compute::test::validation::pack
ITensorPack pack
Definition: Im2Col.cpp:188
ARM_COMPUTE_UNUSED
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:151
tensor
CLTensor * tensor
Pointer to the auxiliary tensor.
Definition: ClWorkloadRuntime.cpp:67
arm_compute::utils::is_in
bool is_in(E check, std::initializer_list< E > list)
Check if the given value is in the given enum value list.
Definition: Utils.h:75
arm_compute::experimental::dynamic_fusion::DependencyGraph::OpPack::operator==
friend bool operator==(const OpPack &opp0, const OpPack &opp1)
Definition: DependencyGraph.h:72
arm_compute::experimental::dynamic_fusion::DependencyGraph::AdjList
std::map< Id, std::vector< Id > > AdjList
Adjacency list.
Definition: DependencyGraph.h:62
arm_compute::experimental::dynamic_fusion::DependencyGraph::OpPack
A pack of operator including its input and output tensors, used by traversing through the graph in to...
Definition: DependencyGraph.h:67
arm_compute
Copyright (c) 2017-2023 Arm Limited.
Definition: introduction.dox:24
arm_compute::mlgo::parser::end
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:283
arm_compute::experimental::dynamic_fusion::DependencyGraph::intermediate_tensors
std::vector< TensorId > intermediate_tensors() const
Get intermediate tensors of the whole graph.
Definition: DependencyGraph.h:329
arm_compute::experimental::dynamic_fusion::DependencyGraph::global_dst_tensors
std::vector< TensorId > global_dst_tensors() const
Get destination tensors of the whole graph.
Definition: DependencyGraph.h:313
arm_compute::experimental::dynamic_fusion::DependencyGraph::OpPack::op
OperatorId op
Definition: DependencyGraph.h:69
tf_frozen_model_extractor.t
t
Definition: tf_frozen_model_extractor.py:49
arm_compute::experimental::dynamic_fusion::DependencyGraph::OpPack::outputs
std::vector< TensorId > outputs
Definition: DependencyGraph.h:71