Compute Library
 21.11
arm_compute::graph::detail Namespace Reference

Functions

void configure_transition_manager (Graph &g, GraphContext &ctx, ExecutionWorkload &workload)
 Configures transition manager and execution workload. More...
 
void validate_all_nodes (Graph &g)
 Validates all nodes. More...
 
void configure_all_tensors (Graph &g)
 Configures all nodes of a graph. More...
 
void allocate_all_input_tensors (INode &node)
 Allocates all input tensors of a node. More...
 
void allocate_all_output_tensors (INode &node)
 Allocates all output tensors of a node. More...
 
void allocate_const_tensors (Graph &g)
 Allocates const tensor of a given graph. More...
 
void allocate_all_tensors (Graph &g)
 Allocates all tensors of a graph. More...
 
ExecutionWorkload configure_all_nodes (Graph &g, GraphContext &ctx, const std::vector< NodeID > &node_order)
 Configures all nodes of graph. More...
 
void release_unused_tensors (Graph &g)
 Release the memory of all unused const nodes. More...
 
void call_tensor_accessor (Tensor *tensor)
 Calls accessor of a given tensor. More...
 
void call_all_const_node_accessors (Graph &g)
 Call all const node accessors. More...
 
bool call_all_input_node_accessors (ExecutionWorkload &workload)
 Call all input node accessors. More...
 
bool call_all_output_node_accessors (ExecutionWorkload &workload)
 Call all output node accessors. More...
 
void prepare_all_tasks (ExecutionWorkload &workload)
 Prepares all tasks for execution. More...
 
void call_all_tasks (ExecutionWorkload &workload)
 Executes all tasks of a workload. More...
 
bool all_inputs_are_visited (const INode *node, const std::vector< bool > &visited)
 Checks if all the input dependencies of a node have been visited. More...
 
void transfer_driving_nodes_and_remove_old_node (Graph &g, INode *new_node, INode *old_node, bool add_output_tensor)
 
void fuse_convolution_with_batch_normalization (Graph &g, const Edge *output_edge)
 
void fuse_depthwise_convolution_with_batch_normalization (Graph &g, const Edge *output_edge)
 
template<typename N >
void fuse_node_with_activation (Graph &g, const Edge *output_edge, const std::set< Activation > &supported_fused_activations)
 
template<typename N >
void fuse_pad_with_convolution (Graph &g, const Edge *output_edge)
 
template<typename N1 , typename N2 , typename F , typename... Args>
void fuse_layer (Graph &g, std::function< bool(INode &)> const &prec, const F fuse_fcn, Args &&... optional_arguments)
 
bool check_post_op_type (NodeType *post_op_type, int len)
 
void fuse_convolution_with_post_op (Graph &g, INode *fused_node, std::list< INode *> post_op_node_list, int prev_op_dst_pos)
 
std::list< INode * > get_post_op_list (Graph &g, int &eltwise_operand_id, int &prev_op_dst_pos, int conv_node_id, const std::set< Activation > &supported_fused_activations)
 
template<typename N >
void fuse_convolution (Graph &g, const Edge *output_edge, int conv_node_id, const std::set< Activation > &supported_fused_activations)
 Fuse below operators: More...
 
template<typename N1 , typename F , typename... Args>
void fuse_layer (Graph &g, std::function< bool(INode &)> const &prec, const F fuse_fcn, Args &&... optional_arguments)
 

Variables

NodeType valide_post_op_type [4][3]
 

Function Documentation

◆ all_inputs_are_visited()

bool arm_compute::graph::detail::all_inputs_are_visited ( const INode node,
const std::vector< bool > &  visited 
)
inline

Checks if all the input dependencies of a node have been visited.

Parameters
[in]nodeNode to check
[in]visitedVector that contains the visited information
Returns
True if all inputs dependencies have been visited else false

Definition at line 46 of file TopologicalSort.cpp.

References ARM_COMPUTE_ERROR_ON, Graph::edge(), arm_compute::graph::EmptyNodeID, INode::graph(), INode::input_edges(), Edge::producer(), and Edge::producer_id().

Referenced by arm_compute::graph::bfs(), and arm_compute::graph::dfs().

47 {
48  ARM_COMPUTE_ERROR_ON(node == nullptr);
49  const Graph *graph = node->graph();
50  ARM_COMPUTE_ERROR_ON(graph == nullptr);
51 
52  bool are_all_visited = true;
53  for(const auto &input_edge_id : node->input_edges())
54  {
55  if(input_edge_id != EmptyNodeID)
56  {
57  const Edge *input_edge = graph->edge(input_edge_id);
58  ARM_COMPUTE_ERROR_ON(input_edge == nullptr);
59  ARM_COMPUTE_ERROR_ON(input_edge->producer() == nullptr);
60  if(!visited[input_edge->producer_id()])
61  {
62  are_all_visited = false;
63  break;
64  }
65  }
66  }
67 
68  return are_all_visited;
69 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
constexpr NodeID EmptyNodeID
Constant EdgeID specifying an equivalent of null edge.
Definition: Types.h:76

◆ allocate_all_input_tensors()

void allocate_all_input_tensors ( INode node)

Allocates all input tensors of a node.

Parameters
[in]nodeNode to allocate the input tensor of

Definition at line 73 of file ExecutionHelpers.cpp.

References ITensorHandle::allocate(), ARM_COMPUTE_ERROR_ON_MSG, Tensor::bound_edges(), Tensor::handle(), INode::input(), and INode::num_inputs().

Referenced by allocate_const_tensors().

74 {
75  for(unsigned int i = 0; i < node.num_inputs(); ++i)
76  {
77  Tensor *tensor = node.input(i);
78  if(tensor != nullptr && !tensor->bound_edges().empty())
79  {
80  ARM_COMPUTE_ERROR_ON_MSG(!tensor->handle(), "Tensor handle is not configured!");
81  tensor->handle()->allocate();
82  }
83  }
84 }
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456

◆ allocate_all_output_tensors()

void allocate_all_output_tensors ( INode node)

Allocates all output tensors of a node.

Parameters
[in]nodeNode to allocate the output tensor of

Definition at line 86 of file ExecutionHelpers.cpp.

References ITensorHandle::allocate(), ARM_COMPUTE_ERROR_ON_MSG, Tensor::bound_edges(), Tensor::handle(), INode::num_outputs(), and INode::output().

Referenced by allocate_const_tensors().

87 {
88  for(unsigned int i = 0; i < node.num_outputs(); ++i)
89  {
90  Tensor *tensor = node.output(i);
91  if(tensor != nullptr && !tensor->bound_edges().empty())
92  {
93  ARM_COMPUTE_ERROR_ON_MSG(!tensor->handle(), "Tensor handle is not configured!");
94  tensor->handle()->allocate();
95  }
96  }
97 }
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456

◆ allocate_all_tensors()

void allocate_all_tensors ( Graph g)

Allocates all tensors of a graph.

Parameters
[in]gGraph to allocate the tensors

Definition at line 120 of file ExecutionHelpers.cpp.

References Graph::tensors().

Referenced by GraphManager::finalize_graph().

121 {
122  auto &tensors = g.tensors();
123 
124  for(auto &tensor : tensors)
125  {
126  if(tensor && !tensor->bound_edges().empty() && tensor->handle() != nullptr && tensor->handle()->tensor().info()->is_resizable() && tensor->handle()->tensor().is_used())
127  {
128  tensor->handle()->allocate();
129  }
130  }
131 }

◆ allocate_const_tensors()

void allocate_const_tensors ( Graph g)

Allocates const tensor of a given graph.

Parameters
[in]gGraph to allocate the tensors

Definition at line 99 of file ExecutionHelpers.cpp.

References allocate_all_input_tensors(), allocate_all_output_tensors(), arm_compute::graph::Const, arm_compute::graph::Input, Graph::nodes(), and arm_compute::graph::Output.

Referenced by GraphManager::finalize_graph().

100 {
101  for(auto &node : g.nodes())
102  {
103  if(node != nullptr)
104  {
105  switch(node->type())
106  {
107  case NodeType::Const:
108  case NodeType::Input:
110  break;
111  case NodeType::Output:
113  default:
114  break;
115  }
116  }
117  }
118 }
void allocate_all_input_tensors(INode &node)
Allocates all input tensors of a node.
void allocate_all_output_tensors(INode &node)
Allocates all output tensors of a node.

◆ call_all_const_node_accessors()

void call_all_const_node_accessors ( Graph g)

Call all const node accessors.

Parameters
[in]gGraph containing the const nodes

Definition at line 193 of file ExecutionHelpers.cpp.

References call_tensor_accessor(), arm_compute::graph::Const, and Graph::nodes().

Referenced by GraphManager::finalize_graph().

194 {
195  auto &nodes = g.nodes();
196 
197  for(auto &node : nodes)
198  {
199  if(node != nullptr && node->type() == NodeType::Const && node->num_outputs())
200  {
201  if(!node->output(0)->bound_edges().empty())
202  {
203  call_tensor_accessor(node->output(0));
204  }
205  }
206  }
207 }
void call_tensor_accessor(Tensor *tensor)
Calls accessor of a given tensor.

◆ call_all_input_node_accessors()

bool call_all_input_node_accessors ( ExecutionWorkload workload)

Call all input node accessors.

Parameters
[in]workloadWorkload to execute
Returns
True if all the accesses were valid

Definition at line 209 of file ExecutionHelpers.cpp.

References arm_compute::mlgo::parser::end(), arm_compute::utility::for_each(), and ExecutionWorkload::inputs.

Referenced by GraphManager::execute_graph().

210 {
211  bool is_valid = true;
212  std::for_each(std::begin(workload.inputs), std::end(workload.inputs), [&](Tensor * input_tensor)
213  {
214  bool valid_input = (input_tensor != nullptr) && input_tensor->call_accessor();
215  is_valid = is_valid && valid_input;
216  });
217  return is_valid;
218 }
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:290
void for_each(F &&)
Base case of for_each.
Definition: Utility.h:110

◆ call_all_output_node_accessors()

bool call_all_output_node_accessors ( ExecutionWorkload workload)

Call all output node accessors.

Parameters
[in]workloadWorkload to execute
Returns
True if all the accessors expect more data

Definition at line 259 of file ExecutionHelpers.cpp.

References arm_compute::mlgo::parser::end(), arm_compute::utility::for_each(), ExecutionWorkload::outputs, and arm_compute::graph::sync_backends().

Referenced by GraphManager::execute_graph().

260 {
261  bool is_valid = true;
262  std::for_each(std::begin(workload.outputs), std::end(workload.outputs), [&](Tensor * output_tensor)
263  {
264  bool valid_output = (output_tensor != nullptr) && output_tensor->call_accessor();
265  is_valid = is_valid && valid_output;
266  });
267 
268  sync_backends();
269 
270  return is_valid;
271 }
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:290
void for_each(F &&)
Base case of for_each.
Definition: Utility.h:110
void sync_backends()
Synchronize kernels execution on the backends.
Definition: Utils.cpp:119

◆ call_all_tasks()

void call_all_tasks ( ExecutionWorkload workload)

Executes all tasks of a workload.

Parameters
[in]workloadWorkload to execute

Definition at line 230 of file ExecutionHelpers.cpp.

References ARM_COMPUTE_ERROR_ON, ExecutionWorkload::ctx, GraphContext::memory_managers(), and ExecutionWorkload::tasks.

Referenced by GraphManager::execute_graph().

231 {
232  ARM_COMPUTE_ERROR_ON(workload.ctx == nullptr);
233 
234  // Acquire memory for the transition buffers
235  for(auto &mm_ctx : workload.ctx->memory_managers())
236  {
237  if(mm_ctx.second.cross_group != nullptr)
238  {
239  mm_ctx.second.cross_group->acquire();
240  }
241  }
242 
243  // Execute tasks
244  for(auto &task : workload.tasks)
245  {
246  task();
247  }
248 
249  // Release memory for the transition buffers
250  for(auto &mm_ctx : workload.ctx->memory_managers())
251  {
252  if(mm_ctx.second.cross_group != nullptr)
253  {
254  mm_ctx.second.cross_group->release();
255  }
256  }
257 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466

◆ call_tensor_accessor()

void call_tensor_accessor ( Tensor tensor)

Calls accessor of a given tensor.

Parameters
[in]tensorThe tensor of which the accessor should be called

Definition at line 187 of file ExecutionHelpers.cpp.

References ARM_COMPUTE_ERROR_ON, and Tensor::call_accessor().

Referenced by call_all_const_node_accessors().

188 {
189  ARM_COMPUTE_ERROR_ON(!tensor);
190  tensor->call_accessor();
191 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466

◆ check_post_op_type()

bool arm_compute::graph::detail::check_post_op_type ( NodeType post_op_type,
int  len 
)

Definition at line 352 of file NodeFusionMutator.cpp.

References MAX_POST_OP_NUM, and MAX_VALIDE_COMBINATION.

Referenced by get_post_op_list().

353 {
354  if(len > MAX_POST_OP_NUM || len <= 0)
355  {
356  return false;
357  }
358 
359  bool found = false;
360  for(int i = 0; i < MAX_VALIDE_COMBINATION; ++i)
361  {
362  for(int j = 0; j < len; ++j)
363  {
364  if(post_op_type[j] != valide_post_op_type[i][j])
365  {
366  found = false;
367  break;
368  }
369  found = true;
370  }
371  if(found)
372  break;
373  }
374 
375  return found;
376 }
#define MAX_POST_OP_NUM
#define MAX_VALIDE_COMBINATION
Check valid combinations:

◆ configure_all_nodes()

ExecutionWorkload configure_all_nodes ( Graph g,
GraphContext ctx,
const std::vector< NodeID > &  node_order 
)

Configures all nodes of graph.

Parameters
[in,out]gGraph to configure the nodes
[in]ctxGraph context to use
[in]node_orderThe order to configure the nodes
Returns
The execution workload

Definition at line 133 of file ExecutionHelpers.cpp.

References INode::assigned_target(), IDeviceBackend::configure_node(), ExecutionWorkload::ctx, BackendRegistry::get(), BackendRegistry::get_backend(), ExecutionWorkload::graph, arm_compute::graph::Input, ExecutionWorkload::inputs, arm_compute::graph::is_utility_node(), Graph::node(), Graph::nodes(), arm_compute::graph::Output, ExecutionWorkload::outputs, and ExecutionWorkload::tasks.

Referenced by GraphManager::finalize_graph().

134 {
135  ExecutionWorkload workload;
136  workload.graph = &g;
137  workload.ctx = &ctx;
138 
139  // Reserve memory for tasks
140  workload.tasks.reserve(node_order.size());
141 
142  // Create tasks
143  for(auto &node_id : node_order)
144  {
145  auto node = g.node(node_id);
146  if(node != nullptr)
147  {
148  Target assigned_target = node->assigned_target();
149  backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target);
150  std::unique_ptr<IFunction> func = backend.configure_node(*node, ctx);
151  if(func != nullptr || is_utility_node(node))
152  {
153  workload.tasks.emplace_back(ExecutionTask(std::move(func), node));
154  }
155  }
156  }
157 
158  // Add inputs and outputs
159  for(auto &node : g.nodes())
160  {
161  if(node != nullptr && node->type() == NodeType::Input)
162  {
163  workload.inputs.push_back(node->output(0));
164  }
165 
166  if(node != nullptr && node->type() == NodeType::Output)
167  {
168  workload.outputs.push_back(node->input(0));
169  continue;
170  }
171  }
172 
173  return workload;
174 }
bool is_utility_node(INode *node)
Definition: Utils.h:37
Target
< Target enum
Definition: Acl.hpp:293

◆ configure_all_tensors()

void configure_all_tensors ( Graph g)

Configures all nodes of a graph.

Parameters
[in]gGraph to configure

Definition at line 56 of file ExecutionHelpers.cpp.

References ARM_COMPUTE_ERROR_ON_MSG, IDeviceBackend::create_tensor(), BackendRegistry::get(), BackendRegistry::get_backend(), and Graph::tensors().

Referenced by GraphManager::finalize_graph().

57 {
58  auto &tensors = g.tensors();
59 
60  for(auto &tensor : tensors)
61  {
62  if(tensor && tensor->handle() == nullptr)
63  {
64  Target target = tensor->desc().target;
65  backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(target);
66  std::unique_ptr<ITensorHandle> handle = backend.create_tensor(*tensor);
67  ARM_COMPUTE_ERROR_ON_MSG(!handle, "Couldn't create backend handle!");
68  tensor->set_handle(std::move(handle));
69  }
70  }
71 }
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
Target
< Target enum
Definition: Acl.hpp:293

◆ configure_transition_manager()

void configure_transition_manager ( Graph g,
GraphContext ctx,
ExecutionWorkload workload 
)

Configures transition manager and execution workload.

Parameters
[in]gGraph to configure
[in]ctxGraph context
[in]workloadWorkload to configure

Definition at line 236 of file CrossLayerMemoryManagerHelpers.cpp.

References MemoryManagerContext::cross_group, MemoryManagerContext::cross_mm, GraphContext::memory_management_ctx(), and ExecutionWorkload::tasks.

Referenced by GraphManager::finalize_graph().

237 {
238  // Get const tensors (un-managed)
239  std::set<ITensorHandle *> const_tensors = get_const_handles(g);
240 
241  std::vector<TaskHandles> tasks_handles;
242  TargetHandleCounter target_handle_count;
243 
244  // Count handles
245  for(auto &task : workload.tasks)
246  {
247  // Populates IO handles
248  tasks_handles.push_back(get_transition_handles(ctx, task, const_tensors));
249 
250  // Count handles
251  count_input_handles_per_target(tasks_handles.back(), target_handle_count);
252  }
253 
254  // Setup memory managers
255  for(auto &hc : target_handle_count)
256  {
257  MemoryManagerContext *mm_ctx = ctx.memory_management_ctx(hc.first);
258  if(mm_ctx != nullptr)
259  {
260  if(mm_ctx->cross_mm != nullptr && mm_ctx->cross_group != nullptr)
261  {
262  // Manage and allocate tensors
263  configure_handle_lifetime(tasks_handles, hc.second);
264  }
265  }
266  }
267 }

◆ fuse_convolution()

void arm_compute::graph::detail::fuse_convolution ( Graph g,
const Edge output_edge,
int  conv_node_id,
const std::set< Activation > &  supported_fused_activations 
)

Fuse below operators:

Main operator Post operators
conv add
conv act + add
conv add + act
conv act + add + act

Notes: currently, only GEMM supports fusion with post operator

Definition at line 538 of file NodeFusionMutator.cpp.

References Graph::add_connection(), Graph::add_node(), ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_VERBOSE, arm_compute::test::validation::conv_info, arm_compute::test::validation::data_layout, TensorDescriptor::data_type, arm_compute::test::validation::data_type, Tensor::desc(), fuse_convolution_with_post_op(), arm_compute::graph::GEMM, get_post_op_list(), arm_compute::is_data_type_float(), TensorDescriptor::layout, arm_compute::NHWC, Graph::node(), arm_compute::test::validation::num_groups, Edge::producer(), Graph::remove_node(), INode::set_assigned_target(), TensorDescriptor::shape, Edge::tensor(), Dimensions< T >::y(), and Dimensions< T >::z().

539 {
540  ARM_COMPUTE_ERROR_ON(output_edge == nullptr);
541 
542  auto *conv_node = arm_compute::utils::cast::polymorphic_downcast<N *>(output_edge->producer());
543  ARM_COMPUTE_ERROR_ON(conv_node->output(0) == nullptr);
544 
545  const ConvolutionMethod conv_algorithm = conv_node->convolution_method();
546  if(conv_algorithm != ConvolutionMethod::GEMM)
547  {
548  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion of convolution node with post ops due to non GEMM convolution\n");
549  return;
550  }
551 
552  // Prevent fusion if fused node has an output accessor
553  if(conv_node->output(0)->accessor() == nullptr)
554  {
555  // If data type is FP32/FP16, data layout is NHWC, and filter size if 1x1, fuse convolution with post op, as Conv1x1 always leads to GEMM.
556  const Edge *input_edge = conv_node->input_edge(1);
557  if(input_edge != nullptr && input_edge->tensor() != nullptr)
558  {
559  const DataLayout data_layout = input_edge->tensor()->desc().layout;
560  const DataType data_type = input_edge->tensor()->desc().data_type;
561  const TensorShape tensor_shape = input_edge->tensor()->desc().shape;
562  if(data_layout != DataLayout::NHWC || is_data_type_float(data_type) == false || tensor_shape.y() != 1 || tensor_shape.z() != 1)
563  {
564  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion of convolution node with post ops due to non GEMM convolution\n");
565  return;
566  }
567  }
568  else
569  {
570  return;
571  }
572 
573  // Get post op list
574  int eltwise_operand_id = 0;
575  int prev_op_dst_pos = 0; // Previous operator dst's postion in current operator
576  std::list<INode *> post_op_node_list = get_post_op_list(g, eltwise_operand_id, prev_op_dst_pos, conv_node_id, supported_fused_activations);
577 
578  if(post_op_node_list.size() == 0)
579  {
580  return;
581  }
582  else // Do convolution fusion with post op if there're one(elementwise), two or more operators
583  {
584  const Target assigned_target = conv_node->assigned_target();
585 
586  // Extract conv inputs
587  const auto conv_input_id = conv_node->input_edge(0)->producer_id();
588  const auto conv_weights_id = conv_node->input_edge(1)->producer_id();
589  const auto conv_info = conv_node->convolution_info();
590  const auto conv_method = conv_node->convolution_method();
591  const auto num_groups = conv_node->num_groups();
592  FastMathHint fast_math_hint = conv_node->fast_math_hint();
593 
594  // Create the fused node
595  const NodeID fused_id = g.add_node<FusedConvolutionWithPostOpNode>(conv_info, num_groups, conv_method, fast_math_hint);
596  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Fusing convolution node with ID : " << conv_node->id());
597 
598  // Add connections from the conv inputs to the fused node
599  g.add_connection(conv_input_id, 0, fused_id, 0);
600  g.add_connection(conv_weights_id, 0, fused_id, 1);
601  if(conv_node->input_edge(2) != nullptr)
602  {
603  auto conv_bias_id = conv_node->input_edge(2)->producer_id();
604  g.add_connection(conv_bias_id, 0, fused_id, 2);
605  }
606  g.add_connection(eltwise_operand_id, 0, fused_id, 3);
607  g.remove_node(conv_node->id());
608 
609  // Update fused node outputs
610  auto fused_node = g.node(fused_id);
611  fused_node->set_assigned_target(assigned_target);
612 
613  // Fuse convolution with post op
614  fuse_convolution_with_post_op(g, fused_node, post_op_node_list, prev_op_dst_pos);
615 
616  post_op_node_list.clear();
618  }
619  }
620  else
621  {
622  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion of convolution node with post ops due to the presence of an output accessor\n");
623  }
624 }
DataType
Definition: Acl.hpp:485
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
ConvolutionMethod
Available ConvolutionMethod.
Definition: Types.h:134
const DataType data_type
Definition: Im2Col.cpp:150
std::list< INode * > get_post_op_list(Graph &g, int &eltwise_operand_id, int &prev_op_dst_pos, int conv_node_id, const std::set< Activation > &supported_fused_activations)
const unsigned int num_groups
Definition: Im2Col.cpp:153
Target
< Target enum
Definition: Acl.hpp:293
FastMathHint
Enable or disable fast math for Convolution layer.
Definition: Types.h:143
GEMM CL kernel type.
Definition: CLTypes.h:85
unsigned int NodeID
Definition: Types.h:69
void fuse_convolution_with_post_op(Graph &g, INode *fused_node, std::list< INode *> post_op_node_list, int prev_op_dst_pos)
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
DataLayout
[DataLayout enum definition]
Definition: Types.h:113
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:961

◆ fuse_convolution_with_batch_normalization()

void arm_compute::graph::detail::fuse_convolution_with_batch_normalization ( Graph g,
const Edge output_edge 
)

Definition at line 81 of file NodeFusionMutator.cpp.

References Graph::add_connection(), Graph::add_node(), ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_VERBOSE, Edge::consumer(), Edge::consumer_id(), arm_compute::test::validation::conv_info, arm_compute::quantization::epsilon, INode::input_edge(), INode::name(), NodeParams::name, Graph::node(), arm_compute::test::validation::num_groups, Edge::producer(), Edge::producer_id(), Graph::remove_node(), and transfer_driving_nodes_and_remove_old_node().

Referenced by NodeFusionMutator::mutate().

82 {
83  ARM_COMPUTE_ERROR_ON(output_edge == nullptr);
84 
85  auto *conv_node = arm_compute::utils::cast::polymorphic_downcast<ConvolutionLayerNode *>(output_edge->producer());
86  auto *bn_node = arm_compute::utils::cast::polymorphic_downcast<BatchNormalizationLayerNode *>(output_edge->consumer());
87 
88  // Not fusing if number of groups is greater than 1
89  if(conv_node->num_groups() > 1)
90  {
91  return;
92  }
93 
94  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Fusing convolution node with ID : " << output_edge->producer_id()
95  << " with BatchNormalization Layer node with ID : " << output_edge->consumer_id() << std::endl);
96 
97  // Prevent fusion if fused node has an output accessor
98  if(conv_node->output(0)->accessor() == nullptr)
99  {
100  const Target assigned_target = conv_node->assigned_target();
101 
102  // Extract conv inputs
103  const auto conv_input_id = conv_node->input_edge(0)->producer_id();
104  const auto conv_weights_id = conv_node->input_edge(1)->producer_id();
105  const auto conv_info = conv_node->convolution_info();
106  const auto conv_method = conv_node->convolution_method();
107  const auto num_groups = conv_node->num_groups();
108  const auto act_info = bn_node->fused_activation();
109  FastMathHint fast_math_hint = conv_node->fast_math_hint();
110 
111  // Extract bn inputs
112  const auto bn_mean_id = bn_node->input_edge(1)->producer_id();
113  const auto bn_var_id = bn_node->input_edge(2)->producer_id();
114 
115  const auto epsilon = bn_node->epsilon();
116 
117  // Create the fused node
118  const NodeID fused_id = g.add_node<FusedConvolutionBatchNormalizationNode>(epsilon, conv_info, num_groups, conv_method, fast_math_hint, act_info);
119 
120  if(conv_node->input_edge(2) != nullptr)
121  {
122  auto conv_bias_id = conv_node->input_edge(2)->producer_id();
123  g.add_connection(conv_bias_id, 0, fused_id, 2);
124  }
125 
126  // Add connections from the conv/batch_norm inputs to the fused node
127  g.add_connection(conv_input_id, 0, fused_id, 0);
128  g.add_connection(conv_weights_id, 0, fused_id, 1);
129  g.add_connection(bn_mean_id, 0, fused_id, 3);
130  g.add_connection(bn_var_id, 0, fused_id, 4);
131 
132  if(bn_node->input_edge(3) != nullptr)
133  {
134  const auto bn_beta_id = bn_node->input_edge(3)->producer_id();
135  g.add_connection(bn_beta_id, 0, fused_id, 5);
136  }
137 
138  if(bn_node->input_edge(4) != nullptr)
139  {
140  const auto bn_gamma_id = bn_node->input_edge(4)->producer_id();
141  g.add_connection(bn_gamma_id, 0, fused_id, 6);
142  }
143 
144  auto fused_node = g.node(fused_id);
145  auto bn_node_name = bn_node->name();
146 
147  transfer_driving_nodes_and_remove_old_node(g, fused_node, bn_node, true);
148 
149  fused_node->set_assigned_target(assigned_target);
150  fused_node->set_common_node_parameters(NodeParams{ conv_node->name() + "+" + bn_node_name, assigned_target });
151 
152  // Remove convolution node
153  g.remove_node(conv_node->id());
154  }
155  else
156  {
157  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion of convolution with batch normalization due to the presence of an output accessor\n");
158  }
159 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
void transfer_driving_nodes_and_remove_old_node(Graph &g, INode *new_node, INode *old_node, bool add_output_tensor)
const unsigned int num_groups
Definition: Im2Col.cpp:153
Target
< Target enum
Definition: Acl.hpp:293
FastMathHint
Enable or disable fast math for Convolution layer.
Definition: Types.h:143
unsigned int NodeID
Definition: Types.h:69
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ fuse_convolution_with_post_op()

void arm_compute::graph::detail::fuse_convolution_with_post_op ( Graph g,
INode fused_node,
std::list< INode *>  post_op_node_list,
int  prev_op_dst_pos 
)

Definition at line 378 of file NodeFusionMutator.cpp.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ActivationLayerNode::node_type, EltwiseLayerNode::node_type, INode::post_op_info_list(), Graph::remove_node(), and transfer_driving_nodes_and_remove_old_node().

Referenced by fuse_convolution().

379 {
380  unsigned int op_idx = 0;
381  // Fuse post operators with conv
382  for(const auto &post_op : post_op_node_list)
383  {
384  switch(post_op->type())
385  {
386  case EltwiseLayerNode::node_type:
387  {
388  auto *eltwise_node = arm_compute::utils::cast::polymorphic_downcast<EltwiseLayerNode *>(post_op);
389  ARM_COMPUTE_ERROR_ON(eltwise_node->output(0) == nullptr);
390 
391  fused_node->post_op_info_list().push_back(std::make_unique<ConvPostOpInfoEltwiseAdd>(prev_op_dst_pos, eltwise_node->convert_policy()));
392  ARM_COMPUTE_LOG_GRAPH_VERBOSE(" with Elementwise Layer node with ID : " << post_op->id());
393  break;
394  }
395  case ActivationLayerNode::node_type:
396  {
397  auto *act_node = arm_compute::utils::cast::polymorphic_downcast<ActivationLayerNode *>(post_op);
398  ARM_COMPUTE_ERROR_ON(act_node->output(0) == nullptr);
399 
400  fused_node->post_op_info_list().push_back(std::make_unique<ConvPostOpInfoActivation>(act_node->activation_info()));
401  ARM_COMPUTE_LOG_GRAPH_VERBOSE(" with Activation Layer node with ID : " << post_op->id());
402  break;
403  }
404  default:
405  {
406  break;
407  }
408  }
409 
410  if(op_idx == post_op_node_list.size() - 1) // last fusable node
411  {
412  transfer_driving_nodes_and_remove_old_node(g, fused_node, post_op, true);
413  }
414  else
415  {
416  // Remove node
417  g.remove_node(post_op->id());
418  }
419  op_idx++;
420  }
421 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
void transfer_driving_nodes_and_remove_old_node(Graph &g, INode *new_node, INode *old_node, bool add_output_tensor)
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ fuse_depthwise_convolution_with_batch_normalization()

void arm_compute::graph::detail::fuse_depthwise_convolution_with_batch_normalization ( Graph g,
const Edge output_edge 
)

Definition at line 161 of file NodeFusionMutator.cpp.

References Graph::add_connection(), Graph::add_node(), ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_VERBOSE, Edge::consumer(), Edge::consumer_id(), arm_compute::test::validation::conv_info, arm_compute::quantization::epsilon, INode::input_edge(), INode::name(), NodeParams::name, Graph::node(), Edge::producer(), Edge::producer_id(), Graph::remove_node(), and transfer_driving_nodes_and_remove_old_node().

Referenced by NodeFusionMutator::mutate().

162 {
163  ARM_COMPUTE_ERROR_ON(output_edge == nullptr);
164 
165  auto *depth_conv_node = arm_compute::utils::cast::polymorphic_downcast<DepthwiseConvolutionLayerNode *>(output_edge->producer());
166  auto *bn_node = arm_compute::utils::cast::polymorphic_downcast<BatchNormalizationLayerNode *>(output_edge->consumer());
167 
168  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Fusing depthwise convolution node with ID : " << output_edge->producer_id()
169  << " with BatchNormalization Layer node with ID : " << output_edge->consumer_id() << std::endl);
170 
171  // Prevent fusion if fused node has an output accessor
172  if(depth_conv_node->output(0)->accessor() == nullptr)
173  {
174  const Target assigned_target = depth_conv_node->assigned_target();
175 
176  // Extract conv inputs
177  const auto depth_conv_input_id = depth_conv_node->input_edge(0)->producer_id();
178  const auto conv_weights_id = depth_conv_node->input_edge(1)->producer_id();
179  const auto conv_info = depth_conv_node->convolution_info();
180  const auto depth_conv_method = depth_conv_node->depthwise_convolution_method();
181  const auto depth_multiplier = depth_conv_node->depth_multiplier();
182  const auto act_info = bn_node->fused_activation();
183 
184  // Extract bn inputs
185  const auto bn_mean_id = bn_node->input_edge(1)->producer_id();
186  const auto bn_var_id = bn_node->input_edge(2)->producer_id();
187  const auto bn_beta_id = bn_node->input_edge(3)->producer_id();
188  const auto bn_gamma_id = bn_node->input_edge(4)->producer_id();
189  const auto epsilon = bn_node->epsilon();
190 
191  // Create the fused node
192  const NodeID fused_id = g.add_node<FusedDepthwiseConvolutionBatchNormalizationNode>(epsilon, conv_info, depth_multiplier, depth_conv_method, act_info);
193 
194  if(depth_conv_node->input_edge(2) != nullptr)
195  {
196  const auto conv_bias_id = depth_conv_node->input_edge(2)->producer_id();
197  g.add_connection(conv_bias_id, 0, fused_id, 2);
198  }
199 
200  // Add connections from the conv/batch_norm inputs to the fused node
201  g.add_connection(depth_conv_input_id, 0, fused_id, 0);
202  g.add_connection(conv_weights_id, 0, fused_id, 1);
203  g.add_connection(bn_mean_id, 0, fused_id, 3);
204  g.add_connection(bn_var_id, 0, fused_id, 4);
205  g.add_connection(bn_beta_id, 0, fused_id, 5);
206  g.add_connection(bn_gamma_id, 0, fused_id, 6);
207 
208  auto fused_node = g.node(fused_id);
209  auto bn_node_name = bn_node->name();
210 
211  transfer_driving_nodes_and_remove_old_node(g, fused_node, bn_node, true);
212 
213  fused_node->set_assigned_target(assigned_target);
214  fused_node->set_common_node_parameters(NodeParams{ depth_conv_node->name() + "+" + bn_node_name, assigned_target });
215 
216  // Remove convolution node
217  g.remove_node(depth_conv_node->id());
218  }
219  else
220  {
221  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion of depthwise convolution with batch normalization due to the presence of an output accessor\n");
222  }
223 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
void transfer_driving_nodes_and_remove_old_node(Graph &g, INode *new_node, INode *old_node, bool add_output_tensor)
Target
< Target enum
Definition: Acl.hpp:293
unsigned int NodeID
Definition: Types.h:69
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ fuse_layer() [1/2]

void arm_compute::graph::detail::fuse_layer ( Graph g,
std::function< bool(INode &)> const &  prec,
const F  fuse_fcn,
Args &&...  optional_arguments 
)

Definition at line 311 of file NodeFusionMutator.cpp.

References Graph::edge(), Graph::node(), Graph::nodes(), and INode::output_edges().

312 {
313  // Note that fused nodes may be added to the end of the node list.
314  // Instead of only looping over the original list of nodes, we loop over the current node list which could be growing.
315  // This is intentional as it probes the newly added fused nodes for further fusing opportunities.
316  for(unsigned int i = 0; i < g.nodes().size(); ++i)
317  {
318  auto node = g.node(i);
319  // Check if the node is of type N1 and not a branching node
320  if(node && node->type() == N1::node_type && node->output_edges().size() == 1)
321  {
322  const auto output_edge_id = *node->output_edges().begin();
323  const auto output_edge = g.edge(output_edge_id);
324 
325  // Check if following node is a type N2 node
326  if((output_edge != nullptr) && (output_edge->consumer() != nullptr) && (output_edge->consumer()->type() == N2::node_type) && prec(*output_edge->producer()))
327  {
328  fuse_fcn(g, output_edge, optional_arguments...);
329  }
330  }
331  }
332 }

◆ fuse_layer() [2/2]

void arm_compute::graph::detail::fuse_layer ( Graph g,
std::function< bool(INode &)> const &  prec,
const F  fuse_fcn,
Args &&...  optional_arguments 
)

Definition at line 627 of file NodeFusionMutator.cpp.

References Graph::edge(), Graph::node(), Graph::nodes(), and INode::output_edges().

628 {
629  // Note that fused nodes may be added to the end of the node list.
630  // Instead of only looping over the original list of nodes, we loop over the current node list which could be growing.
631  // This is intentional as it probes the newly added fused nodes for further fusing opportunities.
632  for(unsigned int i = 0; i < g.nodes().size(); ++i)
633  {
634  auto node = g.node(i);
635  // Check if the node is of type N1 and not a branching node
636  if(node && node->type() == N1::node_type && node->output_edges().size() == 1)
637  {
638  const auto output_edge_id = *node->output_edges().begin();
639  const auto output_edge = g.edge(output_edge_id);
640 
641  // Check if it's the correct target
642  if((output_edge != nullptr) && (output_edge->consumer() != nullptr) && prec(*output_edge->producer()))
643  {
644  fuse_fcn(g, output_edge, i, optional_arguments...);
645  }
646  }
647  }
648 }

◆ fuse_node_with_activation()

void arm_compute::graph::detail::fuse_node_with_activation ( Graph g,
const Edge output_edge,
const std::set< Activation > &  supported_fused_activations 
)

Definition at line 226 of file NodeFusionMutator.cpp.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_VERBOSE, Edge::consumer(), Edge::consumer_id(), arm_compute::graph::EltwiseLayer, arm_compute::is_data_type_float(), Edge::producer(), Edge::producer_id(), and transfer_driving_nodes_and_remove_old_node().

227 {
228  ARM_COMPUTE_ERROR_ON(output_edge == nullptr);
229 
230  auto *n_node = arm_compute::utils::cast::polymorphic_downcast<N *>(output_edge->producer());
231  auto *act_node = arm_compute::utils::cast::polymorphic_downcast<ActivationLayerNode *>(output_edge->consumer());
232 
233  ARM_COMPUTE_ERROR_ON(act_node->output(0) == nullptr || n_node->output(0) == nullptr);
234 
235  // Check if activation is supported for fusion
236  if(supported_fused_activations.count(act_node->activation_info().activation()) == 0)
237  {
238  return;
239  }
240 
241  // EltwiseLayerNode can only be fused when dataype is float
242  if(n_node->type() == NodeType::EltwiseLayer && !is_data_type_float(n_node->output(0)->desc().data_type))
243  {
244  return;
245  }
246 
247  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Fusing node with ID : " << output_edge->producer_id()
248  << " with Activation Layer node with ID : " << output_edge->consumer_id() << std::endl);
249 
250  // Prevent fusion if fused node has an output accessor
251  if(n_node->output(0)->accessor() == nullptr)
252  {
253  // Set activation info to fused node
254  n_node->set_fused_activation(act_node->activation_info());
255 
256  transfer_driving_nodes_and_remove_old_node(g, n_node, act_node, false);
257  }
258  else
259  {
260  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion of node with activation due to the presence of an output accessor\n");
261  }
262 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
void transfer_driving_nodes_and_remove_old_node(Graph &g, INode *new_node, INode *old_node, bool add_output_tensor)
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:961

◆ fuse_pad_with_convolution()

void arm_compute::graph::detail::fuse_pad_with_convolution ( Graph g,
const Edge output_edge 
)

Definition at line 265 of file NodeFusionMutator.cpp.

References Graph::add_connection(), Edge::consumer(), arm_compute::test::validation::conv_info, arm_compute::graph::get_dimension_idx(), arm_compute::graph::get_driver_nodes(), arm_compute::HEIGHT, arm_compute::graph::is_padding_in_height_or_width(), PadStrideInfo::pad_bottom(), PadStrideInfo::pad_left(), PadStrideInfo::pad_right(), PadStrideInfo::pad_top(), Edge::producer(), Graph::remove_node(), PadStrideInfo::round(), PadStrideInfo::stride(), and arm_compute::WIDTH.

266 {
267  auto *pad_node = arm_compute::utils::cast::polymorphic_downcast<PadLayerNode *>(output_edge->producer());
268  auto *conv_node = arm_compute::utils::cast::polymorphic_downcast<N *>(output_edge->consumer());
269 
270  const Edge *input_edge = pad_node->input_edge(0);
271  if(input_edge != nullptr && input_edge->tensor() != nullptr && pad_node->output(0)->accessor() == nullptr
272  && pad_node->pad_value().get<float>() == 0.0)
273  {
274  const DataLayout layout = input_edge->tensor()->desc().layout;
275  const PaddingList padding_list = pad_node->padding();
276 
277  const unsigned int height_index = get_dimension_idx(layout, DataLayoutDimension::HEIGHT);
278  const unsigned int width_index = get_dimension_idx(layout, DataLayoutDimension::WIDTH);
279 
280  const PaddingInfo pad_w = width_index < padding_list.size() ? padding_list[width_index] : PaddingInfo(0, 0);
281  const PaddingInfo pad_h = height_index < padding_list.size() ? padding_list[height_index] : PaddingInfo(0, 0);
282 
283  if(is_padding_in_height_or_width(layout, padding_list))
284  {
285  // Add paddings to the convolution node
286  const PadStrideInfo conv_info = conv_node->convolution_info();
287  const PadStrideInfo new_conv_info(
288  conv_info.stride().first,
289  conv_info.stride().second,
290  conv_info.pad_left() + pad_w.first,
291  conv_info.pad_right() + pad_w.second,
292  conv_info.pad_top() + pad_h.first,
293  conv_info.pad_bottom() + pad_h.second,
294  conv_info.round());
295  conv_node->set_convolution_info(new_conv_info);
296 
297  // Update drivers of the convolution node
298  std::vector<NodeIdxPair> pad_driver_nodes = get_driver_nodes(*pad_node);
299  g.remove_node(pad_node->id());
300 
301  // Update fused node inputs
302  for(auto &driver_node : pad_driver_nodes)
303  {
304  g.add_connection(driver_node.node_id, driver_node.index, conv_node->id(), 0);
305  }
306  }
307  }
308 }
bool is_padding_in_height_or_width(const DataLayout &layout, const PaddingList &padding_list)
Check if padding is in height and/or width dimensions.
std::vector< PaddingInfo > PaddingList
List of padding information.
Definition: Types.h:440
std::vector< NodeIdxPair > get_driver_nodes(const INode &node)
Get the list of driver nodes of a given node.
Definition: Utils.cpp:197
std::pair< uint32_t, uint32_t > PaddingInfo
Padding information as a pair of unsigned int start/end.
Definition: Types.h:437
DataLayout
[DataLayout enum definition]
Definition: Types.h:113
size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get index of a tensor&#39;s given dimension depending on its layout.
Definition: Utils.cpp:148

◆ get_post_op_list()

std::list<INode *> arm_compute::graph::detail::get_post_op_list ( Graph g,
int &  eltwise_operand_id,
int &  prev_op_dst_pos,
int  conv_node_id,
const std::set< Activation > &  supported_fused_activations 
)

Definition at line 423 of file NodeFusionMutator.cpp.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_VERBOSE, check_post_op_type(), arm_compute::graph::Dummy, Graph::edge(), Graph::node(), ActivationLayerNode::node_type, EltwiseLayerNode::node_type, Graph::nodes(), and INode::output_edges().

Referenced by fuse_convolution().

424 {
425  std::list<INode *> post_op_node_list = {};
426  NodeID prev_op_dst_id = conv_node_id;
427  NodeType post_op_type_list[3] = { NodeType::Dummy, NodeType::Dummy, NodeType::Dummy };
428  int post_op_idx = 0;
429  for(unsigned int i = conv_node_id + 1; i < g.nodes().size(); ++i)
430  {
431  auto post_op_node = g.node(i);
432  bool fusable_post_op = false;
433  if(post_op_node != nullptr && post_op_node->output_edges().size() > 0)
434  {
435  const auto post_op_output_edge_id = *post_op_node->output_edges().begin();
436  const auto post_op_output_edge = g.edge(post_op_output_edge_id);
437 
438  if(post_op_output_edge != nullptr)
439  {
440  switch(post_op_output_edge->producer()->type())
441  {
442  case EltwiseLayerNode::node_type:
443  {
444  auto *eltwise_node = arm_compute::utils::cast::polymorphic_downcast<EltwiseLayerNode *>(post_op_output_edge->producer());
445  ARM_COMPUTE_ERROR_ON(eltwise_node->output(0) == nullptr);
446  if(eltwise_node->output(0)->accessor() == nullptr)
447  {
448  post_op_node_list.push_back(post_op_output_edge->producer());
449  fusable_post_op = true;
450  post_op_type_list[post_op_idx++] = eltwise_node->type();
451 
452  // Extract elementwise inputs
453  const auto eltwise_input_id_0 = eltwise_node->input_edge(0)->producer_id();
454  const auto eltwise_input_id_1 = eltwise_node->input_edge(1)->producer_id();
455  if(eltwise_input_id_0 == prev_op_dst_id)
456  {
457  eltwise_operand_id = eltwise_input_id_1;
458  prev_op_dst_pos = 0;
459  }
460  else if(eltwise_input_id_1 == prev_op_dst_id)
461  {
462  eltwise_operand_id = eltwise_input_id_0;
463  prev_op_dst_pos = 1;
464  }
465  }
466  else
467  {
468  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion of convolution node with elementwise due to the presence of an output accessor\n");
469  }
470  break;
471  }
472  case ActivationLayerNode::node_type:
473  {
474  auto *act_node = arm_compute::utils::cast::polymorphic_downcast<ActivationLayerNode *>(post_op_output_edge->producer());
475  ARM_COMPUTE_ERROR_ON(act_node->output(0) == nullptr);
476  // Check if activation is supported for fusion
477  if(supported_fused_activations.count(act_node->activation_info().activation()) == 0)
478  {
479  break;
480  }
481  if(act_node->output(0)->accessor() == nullptr)
482  {
483  post_op_node_list.push_back(post_op_output_edge->producer());
484  fusable_post_op = true;
485  post_op_type_list[post_op_idx++] = act_node->type();
486  prev_op_dst_id = act_node->id();
487  }
488  else
489  {
490  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion of convolution node with activation due to the presence of an output accessor\n");
491  }
492  break;
493  }
494  default:
495  {
496  break;
497  }
498  }
499  }
500 
501  // Check if the node is not a branching node and current node is fusable
502  if(post_op_node->output_edges().size() == 1 && fusable_post_op == true && post_op_node_list.size() < 3)
503  {
504  continue;
505  }
506  else
507  {
508  break;
509  }
510  }
511  }
512 
513  // Check whether it's valid post op list
514  if(post_op_node_list.size() > 0)
515  {
516  bool fuse_with_post_op = check_post_op_type(post_op_type_list, post_op_node_list.size());
517  if(!fuse_with_post_op)
518  {
519  post_op_node_list.clear();
520  }
521  }
522 
523  return post_op_node_list;
524 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
NodeType
Supported nodes.
Definition: Types.h:199
unsigned int NodeID
Definition: Types.h:69
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
bool check_post_op_type(NodeType *post_op_type, int len)

◆ prepare_all_tasks()

void prepare_all_tasks ( ExecutionWorkload workload)

Prepares all tasks for execution.

Parameters
[in]workloadWorkload to prepare

Definition at line 220 of file ExecutionHelpers.cpp.

References ARM_COMPUTE_ERROR_ON, ExecutionWorkload::graph, release_unused_tensors(), and ExecutionWorkload::tasks.

Referenced by GraphManager::finalize_graph().

221 {
222  ARM_COMPUTE_ERROR_ON(workload.graph == nullptr);
223  for(auto &task : workload.tasks)
224  {
225  task.prepare();
226  release_unused_tensors(*workload.graph);
227  }
228 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
void release_unused_tensors(Graph &g)
Release the memory of all unused const nodes.

◆ release_unused_tensors()

void release_unused_tensors ( Graph g)

Release the memory of all unused const nodes.

Parameters
[in]gGraph to release the memory from

Definition at line 176 of file ExecutionHelpers.cpp.

References Graph::tensors().

Referenced by prepare_all_tasks().

177 {
178  for(auto &tensor : g.tensors())
179  {
180  if(tensor != nullptr && tensor->handle() != nullptr)
181  {
182  tensor->handle()->release_if_unused();
183  }
184  }
185 }

◆ transfer_driving_nodes_and_remove_old_node()

void arm_compute::graph::detail::transfer_driving_nodes_and_remove_old_node ( Graph g,
INode new_node,
INode old_node,
bool  add_output_tensor 
)

Definition at line 47 of file NodeFusionMutator.cpp.

References Graph::add_connection(), arm_compute::graph::configure_tensor(), Tensor::extract_accessor(), arm_compute::graph::get_driving_nodes(), INode::id(), INode::output(), Graph::remove_node(), and Tensor::set_accessor().

Referenced by fuse_convolution_with_batch_normalization(), fuse_convolution_with_post_op(), fuse_depthwise_convolution_with_batch_normalization(), and fuse_node_with_activation().

48 {
49  if(new_node == nullptr || old_node == nullptr)
50  {
51  return;
52  }
53 
54  // Get driving nodes of last fusable node
55  std::vector<NodeIdxPair> last_driving_nodes = get_driving_nodes(*old_node);
56 
57  // Extract last fusable node accessor if any
58  if(old_node->output(0) == nullptr)
59  {
60  return;
61  }
62  auto old_node_accessor = old_node->output(0)->extract_accessor();
63 
64  // Remove node
65  g.remove_node(old_node->id());
66 
67  // Update fused node outputs
68  for(auto &driving_node : last_driving_nodes)
69  {
70  g.add_connection(new_node->id(), 0, driving_node.node_id, driving_node.index);
71  if(add_output_tensor)
72  {
73  configure_tensor(new_node->output(0));
74  }
75  }
76 
77  // Update accessor to fused node
78  new_node->output(0)->set_accessor(std::move(old_node_accessor));
79 }
void configure_tensor(Tensor *tensor)
Configures tensor.
Definition: Utils.cpp:217
std::vector< NodeIdxPair > get_driving_nodes(const INode &node)
Get the list of driving nodes of a given node.
Definition: Utils.cpp:177

◆ validate_all_nodes()

void validate_all_nodes ( Graph g)

Validates all nodes.

Parameters
[in]gGraph to validate

Definition at line 39 of file ExecutionHelpers.cpp.

References ARM_COMPUTE_ERROR_ON_MSG, Status::error_description(), BackendRegistry::get(), BackendRegistry::get_backend(), Graph::nodes(), and IDeviceBackend::validate_node().

Referenced by GraphManager::finalize_graph().

40 {
41  auto &nodes = g.nodes();
42 
43  // Create tasks
44  for(auto &node : nodes)
45  {
46  if(node != nullptr)
47  {
48  Target assigned_target = node->assigned_target();
49  backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target);
50  Status status = backend.validate_node(*node);
51  ARM_COMPUTE_ERROR_ON_MSG(!bool(status), status.error_description().c_str());
52  }
53  }
54 }
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
Target
< Target enum
Definition: Acl.hpp:293

Variable Documentation

◆ valide_post_op_type

NodeType valide_post_op_type[4][3]
Initial value:
= { { EltwiseLayerNode::node_type },
{ EltwiseLayerNode::node_type, ActivationLayerNode::node_type },
{ ActivationLayerNode::node_type, EltwiseLayerNode::node_type },
{ ActivationLayerNode::node_type, EltwiseLayerNode::node_type, ActivationLayerNode::node_type }
}

Definition at line 346 of file NodeFusionMutator.cpp.