Compute Library
 21.08
arm_compute::graph::detail Namespace Reference

Functions

void configure_transition_manager (Graph &g, GraphContext &ctx, ExecutionWorkload &workload)
 Configures transition manager and execution workload. More...
 
void validate_all_nodes (Graph &g)
 Validates all nodes. More...
 
void configure_all_tensors (Graph &g)
 Configures all nodes of a graph. More...
 
void allocate_all_input_tensors (INode &node)
 Allocates all input tensors of a node. More...
 
void allocate_all_output_tensors (INode &node)
 Allocates all output tensors of a node. More...
 
void allocate_const_tensors (Graph &g)
 Allocates const tensor of a given graph. More...
 
void allocate_all_tensors (Graph &g)
 Allocates all tensors of a graph. More...
 
ExecutionWorkload configure_all_nodes (Graph &g, GraphContext &ctx, const std::vector< NodeID > &node_order)
 Configures all nodes of graph. More...
 
void release_unused_tensors (Graph &g)
 Release the memory of all unused const nodes. More...
 
void call_tensor_accessor (Tensor *tensor)
 Calls accessor of a given tensor. More...
 
void call_all_const_node_accessors (Graph &g)
 Call all const node accessors. More...
 
bool call_all_input_node_accessors (ExecutionWorkload &workload)
 Call all input node accessors. More...
 
bool call_all_output_node_accessors (ExecutionWorkload &workload)
 Call all output node accessors. More...
 
void prepare_all_tasks (ExecutionWorkload &workload)
 Prepares all tasks for execution. More...
 
void call_all_tasks (ExecutionWorkload &workload)
 Executes all tasks of a workload. More...
 
bool all_inputs_are_visited (const INode *node, const std::vector< bool > &visited)
 Checks if all the input dependencies of a node have been visited. More...
 
void fuse_convolution_with_batch_normalization (Graph &g, const Edge *output_edge)
 
void fuse_depthwise_convolution_with_batch_normalization (Graph &g, const Edge *output_edge)
 
template<typename N >
void fuse_node_with_activation (Graph &g, const Edge *output_edge, const std::set< Activation > &supported_fused_activations)
 
template<typename N1 , typename N2 , typename F , typename... Args>
void fuse_layer (Graph &g, std::function< bool(INode &)> const &prec, const F fuse_fcn, Args &&... optional_arguments)
 

Function Documentation

◆ all_inputs_are_visited()

bool arm_compute::graph::detail::all_inputs_are_visited ( const INode node,
const std::vector< bool > &  visited 
)
inline

Checks if all the input dependencies of a node have been visited.

Parameters
[in]nodeNode to check
[in]visitedVector that contains the visited information
Returns
True if all inputs dependencies have been visited else false

Definition at line 46 of file TopologicalSort.cpp.

References ARM_COMPUTE_ERROR_ON, Graph::edge(), arm_compute::graph::EmptyNodeID, INode::graph(), INode::input_edges(), Edge::producer(), and Edge::producer_id().

Referenced by arm_compute::graph::bfs(), and arm_compute::graph::dfs().

47 {
48  ARM_COMPUTE_ERROR_ON(node == nullptr);
49  const Graph *graph = node->graph();
50  ARM_COMPUTE_ERROR_ON(graph == nullptr);
51 
52  bool are_all_visited = true;
53  for(const auto &input_edge_id : node->input_edges())
54  {
55  if(input_edge_id != EmptyNodeID)
56  {
57  const Edge *input_edge = graph->edge(input_edge_id);
58  ARM_COMPUTE_ERROR_ON(input_edge == nullptr);
59  ARM_COMPUTE_ERROR_ON(input_edge->producer() == nullptr);
60  if(!visited[input_edge->producer_id()])
61  {
62  are_all_visited = false;
63  break;
64  }
65  }
66  }
67 
68  return are_all_visited;
69 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
constexpr NodeID EmptyNodeID
Constant EdgeID specifying an equivalent of null edge.
Definition: Types.h:75

◆ allocate_all_input_tensors()

void allocate_all_input_tensors ( INode node)

Allocates all input tensors of a node.

Parameters
[in]nodeNode to allocate the input tensor of

Definition at line 73 of file ExecutionHelpers.cpp.

References ITensorHandle::allocate(), ARM_COMPUTE_ERROR_ON_MSG, Tensor::bound_edges(), Tensor::handle(), INode::input(), and INode::num_inputs().

Referenced by allocate_const_tensors().

74 {
75  for(unsigned int i = 0; i < node.num_inputs(); ++i)
76  {
77  Tensor *tensor = node.input(i);
78  if(tensor != nullptr && !tensor->bound_edges().empty())
79  {
80  ARM_COMPUTE_ERROR_ON_MSG(!tensor->handle(), "Tensor handle is not configured!");
81  tensor->handle()->allocate();
82  }
83  }
84 }
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456

◆ allocate_all_output_tensors()

void allocate_all_output_tensors ( INode node)

Allocates all output tensors of a node.

Parameters
[in]nodeNode to allocate the output tensor of

Definition at line 86 of file ExecutionHelpers.cpp.

References ITensorHandle::allocate(), ARM_COMPUTE_ERROR_ON_MSG, Tensor::bound_edges(), Tensor::handle(), INode::num_outputs(), and INode::output().

Referenced by allocate_const_tensors().

87 {
88  for(unsigned int i = 0; i < node.num_outputs(); ++i)
89  {
90  Tensor *tensor = node.output(i);
91  if(tensor != nullptr && !tensor->bound_edges().empty())
92  {
93  ARM_COMPUTE_ERROR_ON_MSG(!tensor->handle(), "Tensor handle is not configured!");
94  tensor->handle()->allocate();
95  }
96  }
97 }
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456

◆ allocate_all_tensors()

void allocate_all_tensors ( Graph g)

Allocates all tensors of a graph.

Parameters
[in]gGraph to allocate the tensors

Definition at line 120 of file ExecutionHelpers.cpp.

References Graph::tensors().

Referenced by GraphManager::finalize_graph().

121 {
122  auto &tensors = g.tensors();
123 
124  for(auto &tensor : tensors)
125  {
126  if(tensor && !tensor->bound_edges().empty() && tensor->handle() != nullptr && tensor->handle()->tensor().info()->is_resizable() && tensor->handle()->tensor().is_used())
127  {
128  tensor->handle()->allocate();
129  }
130  }
131 }

◆ allocate_const_tensors()

void allocate_const_tensors ( Graph g)

Allocates const tensor of a given graph.

Parameters
[in]gGraph to allocate the tensors

Definition at line 99 of file ExecutionHelpers.cpp.

References allocate_all_input_tensors(), allocate_all_output_tensors(), arm_compute::graph::Const, arm_compute::graph::Input, Graph::nodes(), and arm_compute::graph::Output.

Referenced by GraphManager::finalize_graph().

100 {
101  for(auto &node : g.nodes())
102  {
103  if(node != nullptr)
104  {
105  switch(node->type())
106  {
107  case NodeType::Const:
108  case NodeType::Input:
110  break;
111  case NodeType::Output:
113  default:
114  break;
115  }
116  }
117  }
118 }
void allocate_all_input_tensors(INode &node)
Allocates all input tensors of a node.
void allocate_all_output_tensors(INode &node)
Allocates all output tensors of a node.

◆ call_all_const_node_accessors()

void call_all_const_node_accessors ( Graph g)

Call all const node accessors.

Parameters
[in]gGraph containing the const nodes

Definition at line 193 of file ExecutionHelpers.cpp.

References call_tensor_accessor(), arm_compute::graph::Const, and Graph::nodes().

Referenced by GraphManager::finalize_graph().

194 {
195  auto &nodes = g.nodes();
196 
197  for(auto &node : nodes)
198  {
199  if(node != nullptr && node->type() == NodeType::Const && node->num_outputs())
200  {
201  if(!node->output(0)->bound_edges().empty())
202  {
203  call_tensor_accessor(node->output(0));
204  }
205  }
206  }
207 }
void call_tensor_accessor(Tensor *tensor)
Calls accessor of a given tensor.

◆ call_all_input_node_accessors()

bool call_all_input_node_accessors ( ExecutionWorkload workload)

Call all input node accessors.

Parameters
[in]workloadWorkload to execute
Returns
True if all the accesses were valid

Definition at line 209 of file ExecutionHelpers.cpp.

References arm_compute::mlgo::parser::end(), arm_compute::utility::for_each(), and ExecutionWorkload::inputs.

Referenced by GraphManager::execute_graph().

210 {
211  bool is_valid = true;
212  std::for_each(std::begin(workload.inputs), std::end(workload.inputs), [&](Tensor * input_tensor)
213  {
214  bool valid_input = (input_tensor != nullptr) && input_tensor->call_accessor();
215  is_valid = is_valid && valid_input;
216  });
217  return is_valid;
218 }
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:290
void for_each(F &&)
Base case of for_each.
Definition: Utility.h:110

◆ call_all_output_node_accessors()

bool call_all_output_node_accessors ( ExecutionWorkload workload)

Call all output node accessors.

Parameters
[in]workloadWorkload to execute
Returns
True if all the accessors expect more data

Definition at line 259 of file ExecutionHelpers.cpp.

References arm_compute::mlgo::parser::end(), arm_compute::utility::for_each(), and ExecutionWorkload::outputs.

Referenced by GraphManager::execute_graph().

260 {
261  bool is_valid = true;
262  std::for_each(std::begin(workload.outputs), std::end(workload.outputs), [&](Tensor * output_tensor)
263  {
264  bool valid_output = (output_tensor != nullptr) && output_tensor->call_accessor();
265  is_valid = is_valid && valid_output;
266  });
267 
268  return is_valid;
269 }
void end(TokenStream &in, bool &valid)
Definition: MLGOParser.cpp:290
void for_each(F &&)
Base case of for_each.
Definition: Utility.h:110

◆ call_all_tasks()

void call_all_tasks ( ExecutionWorkload workload)

Executes all tasks of a workload.

Parameters
[in]workloadWorkload to execute

Definition at line 230 of file ExecutionHelpers.cpp.

References ARM_COMPUTE_ERROR_ON, ExecutionWorkload::ctx, GraphContext::memory_managers(), and ExecutionWorkload::tasks.

Referenced by GraphManager::execute_graph().

231 {
232  ARM_COMPUTE_ERROR_ON(workload.ctx == nullptr);
233 
234  // Acquire memory for the transition buffers
235  for(auto &mm_ctx : workload.ctx->memory_managers())
236  {
237  if(mm_ctx.second.cross_group != nullptr)
238  {
239  mm_ctx.second.cross_group->acquire();
240  }
241  }
242 
243  // Execute tasks
244  for(auto &task : workload.tasks)
245  {
246  task();
247  }
248 
249  // Release memory for the transition buffers
250  for(auto &mm_ctx : workload.ctx->memory_managers())
251  {
252  if(mm_ctx.second.cross_group != nullptr)
253  {
254  mm_ctx.second.cross_group->release();
255  }
256  }
257 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466

◆ call_tensor_accessor()

void call_tensor_accessor ( Tensor tensor)

Calls accessor of a given tensor.

Parameters
[in]tensorThe tensor of which the accessor should be called

Definition at line 187 of file ExecutionHelpers.cpp.

References ARM_COMPUTE_ERROR_ON, and Tensor::call_accessor().

Referenced by call_all_const_node_accessors().

188 {
189  ARM_COMPUTE_ERROR_ON(!tensor);
190  tensor->call_accessor();
191 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466

◆ configure_all_nodes()

ExecutionWorkload configure_all_nodes ( Graph g,
GraphContext ctx,
const std::vector< NodeID > &  node_order 
)

Configures all nodes of graph.

Parameters
[in,out]gGraph to configure the nodes
[in]ctxGraph context to use
[in]node_orderThe order to configure the nodes
Returns
The execution workload

Definition at line 133 of file ExecutionHelpers.cpp.

References INode::assigned_target(), IDeviceBackend::configure_node(), ExecutionWorkload::ctx, BackendRegistry::get(), BackendRegistry::get_backend(), ExecutionWorkload::graph, arm_compute::graph::Input, ExecutionWorkload::inputs, arm_compute::graph::is_utility_node(), Graph::node(), Graph::nodes(), arm_compute::graph::Output, ExecutionWorkload::outputs, and ExecutionWorkload::tasks.

Referenced by GraphManager::finalize_graph().

134 {
135  ExecutionWorkload workload;
136  workload.graph = &g;
137  workload.ctx = &ctx;
138 
139  // Reserve memory for tasks
140  workload.tasks.reserve(node_order.size());
141 
142  // Create tasks
143  for(auto &node_id : node_order)
144  {
145  auto node = g.node(node_id);
146  if(node != nullptr)
147  {
148  Target assigned_target = node->assigned_target();
149  backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target);
150  std::unique_ptr<IFunction> func = backend.configure_node(*node, ctx);
151  if(func != nullptr || is_utility_node(node))
152  {
153  workload.tasks.emplace_back(ExecutionTask(std::move(func), node));
154  }
155  }
156  }
157 
158  // Add inputs and outputs
159  for(auto &node : g.nodes())
160  {
161  if(node != nullptr && node->type() == NodeType::Input)
162  {
163  workload.inputs.push_back(node->output(0));
164  }
165 
166  if(node != nullptr && node->type() == NodeType::Output)
167  {
168  workload.outputs.push_back(node->input(0));
169  continue;
170  }
171  }
172 
173  return workload;
174 }
bool is_utility_node(INode *node)
Definition: Utils.h:37
Target
< Target enum
Definition: Acl.hpp:293

◆ configure_all_tensors()

void configure_all_tensors ( Graph g)

Configures all nodes of a graph.

Parameters
[in]gGraph to configure

Definition at line 56 of file ExecutionHelpers.cpp.

References ARM_COMPUTE_ERROR_ON_MSG, IDeviceBackend::create_tensor(), BackendRegistry::get(), BackendRegistry::get_backend(), and Graph::tensors().

Referenced by GraphManager::finalize_graph().

57 {
58  auto &tensors = g.tensors();
59 
60  for(auto &tensor : tensors)
61  {
62  if(tensor && tensor->handle() == nullptr)
63  {
64  Target target = tensor->desc().target;
65  backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(target);
66  std::unique_ptr<ITensorHandle> handle = backend.create_tensor(*tensor);
67  ARM_COMPUTE_ERROR_ON_MSG(!handle, "Couldn't create backend handle!");
68  tensor->set_handle(std::move(handle));
69  }
70  }
71 }
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
Target
< Target enum
Definition: Acl.hpp:293

◆ configure_transition_manager()

void configure_transition_manager ( Graph g,
GraphContext ctx,
ExecutionWorkload workload 
)

Configures transition manager and execution workload.

Parameters
[in]gGraph to configure
[in]ctxGraph context
[in]workloadWorkload to configure

Definition at line 236 of file CrossLayerMemoryManagerHelpers.cpp.

References MemoryManagerContext::cross_group, MemoryManagerContext::cross_mm, GraphContext::memory_management_ctx(), and ExecutionWorkload::tasks.

Referenced by GraphManager::finalize_graph().

237 {
238  // Get const tensors (un-managed)
239  std::set<ITensorHandle *> const_tensors = get_const_handles(g);
240 
241  std::vector<TaskHandles> tasks_handles;
242  TargetHandleCounter target_handle_count;
243 
244  // Count handles
245  for(auto &task : workload.tasks)
246  {
247  // Populates IO handles
248  tasks_handles.push_back(get_transition_handles(ctx, task, const_tensors));
249 
250  // Count handles
251  count_input_handles_per_target(tasks_handles.back(), target_handle_count);
252  }
253 
254  // Setup memory managers
255  for(auto &hc : target_handle_count)
256  {
257  MemoryManagerContext *mm_ctx = ctx.memory_management_ctx(hc.first);
258  if(mm_ctx != nullptr)
259  {
260  if(mm_ctx->cross_mm != nullptr && mm_ctx->cross_group != nullptr)
261  {
262  // Manage and allocate tensors
263  configure_handle_lifetime(tasks_handles, hc.second);
264  }
265  }
266  }
267 }

◆ fuse_convolution_with_batch_normalization()

void arm_compute::graph::detail::fuse_convolution_with_batch_normalization ( Graph g,
const Edge output_edge 
)

Definition at line 43 of file NodeFusionMutator.cpp.

References Graph::add_connection(), Graph::add_node(), ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_VERBOSE, arm_compute::graph::configure_tensor(), Edge::consumer(), Edge::consumer_id(), arm_compute::test::validation::conv_info, arm_compute::quantization::epsilon, arm_compute::graph::get_driving_nodes(), INode::input_edge(), NodeParams::name, Graph::node(), arm_compute::test::validation::num_groups, Edge::producer(), Edge::producer_id(), and Graph::remove_node().

Referenced by NodeFusionMutator::mutate().

44 {
45  ARM_COMPUTE_ERROR_ON(output_edge == nullptr);
46 
47  auto *conv_node = arm_compute::utils::cast::polymorphic_downcast<ConvolutionLayerNode *>(output_edge->producer());
48  auto *bn_node = arm_compute::utils::cast::polymorphic_downcast<BatchNormalizationLayerNode *>(output_edge->consumer());
49 
50  // Not fusing if number of groups is greater than 1
51  if(conv_node->num_groups() > 1)
52  {
53  return;
54  }
55 
56  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Fusing convolution node with ID : " << output_edge->producer_id()
57  << " with BatchNormalization Layer node with ID : " << output_edge->consumer_id() << std::endl);
58 
59  // Prevent fusion if fused node has an output accessor
60  if(conv_node->output(0)->accessor() == nullptr)
61  {
62  const Target assigned_target = conv_node->assigned_target();
63 
64  // Extract conv inputs
65  const auto conv_input_id = conv_node->input_edge(0)->producer_id();
66  const auto conv_weights_id = conv_node->input_edge(1)->producer_id();
67  const auto conv_info = conv_node->convolution_info();
68  const auto conv_method = conv_node->convolution_method();
69  const auto num_groups = conv_node->num_groups();
70  const auto act_info = bn_node->fused_activation();
71  FastMathHint fast_math_hint = conv_node->fast_math_hint();
72 
73  // Extract bn inputs
74  const auto bn_mean_id = bn_node->input_edge(1)->producer_id();
75  const auto bn_var_id = bn_node->input_edge(2)->producer_id();
76 
77  const auto epsilon = bn_node->epsilon();
78 
79  // Create the fused node
80  const NodeID fused_id = g.add_node<FusedConvolutionBatchNormalizationNode>(epsilon, conv_info, num_groups, conv_method, fast_math_hint, act_info);
81 
82  if(conv_node->input_edge(2) != nullptr)
83  {
84  auto conv_bias_id = conv_node->input_edge(2)->producer_id();
85  g.add_connection(conv_bias_id, 0, fused_id, 2);
86  }
87 
88  // Add connections from the conv/batch_norm inputs to the fused node
89  g.add_connection(conv_input_id, 0, fused_id, 0);
90  g.add_connection(conv_weights_id, 0, fused_id, 1);
91  g.add_connection(bn_mean_id, 0, fused_id, 3);
92  g.add_connection(bn_var_id, 0, fused_id, 4);
93 
94  if(bn_node->input_edge(3) != nullptr)
95  {
96  const auto bn_beta_id = bn_node->input_edge(3)->producer_id();
97  g.add_connection(bn_beta_id, 0, fused_id, 5);
98  }
99 
100  if(bn_node->input_edge(4) != nullptr)
101  {
102  const auto bn_gamma_id = bn_node->input_edge(4)->producer_id();
103  g.add_connection(bn_gamma_id, 0, fused_id, 6);
104  }
105 
106  auto fused_node = g.node(fused_id);
107  std::vector<NodeIdxPair> bn_driving_nodes = get_driving_nodes(*bn_node);
108 
109  // Extract batch normalization node accessor if any
110  auto bn_node_accessor = bn_node->output(0)->extract_accessor();
111  auto bn_node_name = bn_node->name();
112 
113  // Remove batch normalization node
114  g.remove_node(bn_node->id());
115 
116  // Get driving nodes of batch normalization node
117  for(auto &driving_node : bn_driving_nodes)
118  {
119  g.add_connection(fused_id, 0, driving_node.node_id, driving_node.index);
120  configure_tensor(fused_node->output(0));
121  }
122  // Update fused node outputs
123  fused_node->output(0)->set_accessor(std::move(bn_node_accessor));
124  fused_node->set_assigned_target(assigned_target);
125  fused_node->set_common_node_parameters(NodeParams{ conv_node->name() + "+" + bn_node_name, assigned_target });
126 
127  // Remove convolution node
128  g.remove_node(conv_node->id());
129  }
130  else
131  {
132  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion of convolution with batch normalization due to the presence of an output accessor\n");
133  }
134 }
void configure_tensor(Tensor *tensor)
Configures tensor.
Definition: Utils.cpp:186
std::vector< NodeIdxPair > get_driving_nodes(const INode &node)
Get the list of driving nodes of a given node.
Definition: Utils.cpp:166
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
const unsigned int num_groups
Definition: Im2Col.cpp:153
Target
< Target enum
Definition: Acl.hpp:293
FastMathHint
Enable or disable fast math for Convolution layer.
Definition: Types.h:142
unsigned int NodeID
Definition: Types.h:68
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ fuse_depthwise_convolution_with_batch_normalization()

void arm_compute::graph::detail::fuse_depthwise_convolution_with_batch_normalization ( Graph g,
const Edge output_edge 
)

Definition at line 136 of file NodeFusionMutator.cpp.

References Graph::add_connection(), Graph::add_node(), ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_VERBOSE, arm_compute::graph::configure_tensor(), Edge::consumer(), Edge::consumer_id(), arm_compute::test::validation::conv_info, arm_compute::quantization::epsilon, arm_compute::graph::get_driving_nodes(), INode::input_edge(), NodeParams::name, Graph::node(), Edge::producer(), Edge::producer_id(), and Graph::remove_node().

Referenced by NodeFusionMutator::mutate().

137 {
138  ARM_COMPUTE_ERROR_ON(output_edge == nullptr);
139 
140  auto *depth_conv_node = arm_compute::utils::cast::polymorphic_downcast<DepthwiseConvolutionLayerNode *>(output_edge->producer());
141  auto *bn_node = arm_compute::utils::cast::polymorphic_downcast<BatchNormalizationLayerNode *>(output_edge->consumer());
142 
143  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Fusing depthwise convolution node with ID : " << output_edge->producer_id()
144  << " with BatchNormalization Layer node with ID : " << output_edge->consumer_id() << std::endl);
145 
146  // Prevent fusion if fused node has an output accessor
147  if(depth_conv_node->output(0)->accessor() == nullptr)
148  {
149  const Target assigned_target = depth_conv_node->assigned_target();
150 
151  // Extract conv inputs
152  const auto depth_conv_input_id = depth_conv_node->input_edge(0)->producer_id();
153  const auto conv_weights_id = depth_conv_node->input_edge(1)->producer_id();
154  const auto conv_info = depth_conv_node->convolution_info();
155  const auto depth_conv_method = depth_conv_node->depthwise_convolution_method();
156  const auto depth_multiplier = depth_conv_node->depth_multiplier();
157  const auto act_info = bn_node->fused_activation();
158 
159  // Extract bn inputs
160  const auto bn_mean_id = bn_node->input_edge(1)->producer_id();
161  const auto bn_var_id = bn_node->input_edge(2)->producer_id();
162  const auto bn_beta_id = bn_node->input_edge(3)->producer_id();
163  const auto bn_gamma_id = bn_node->input_edge(4)->producer_id();
164  const auto epsilon = bn_node->epsilon();
165 
166  // Create the fused node
167  const NodeID fused_id = g.add_node<FusedDepthwiseConvolutionBatchNormalizationNode>(epsilon, conv_info, depth_multiplier, depth_conv_method, act_info);
168 
169  if(depth_conv_node->input_edge(2) != nullptr)
170  {
171  const auto conv_bias_id = depth_conv_node->input_edge(2)->producer_id();
172  g.add_connection(conv_bias_id, 0, fused_id, 2);
173  }
174 
175  // Add connections from the conv/batch_norm inputs to the fused node
176  g.add_connection(depth_conv_input_id, 0, fused_id, 0);
177  g.add_connection(conv_weights_id, 0, fused_id, 1);
178  g.add_connection(bn_mean_id, 0, fused_id, 3);
179  g.add_connection(bn_var_id, 0, fused_id, 4);
180  g.add_connection(bn_beta_id, 0, fused_id, 5);
181  g.add_connection(bn_gamma_id, 0, fused_id, 6);
182 
183  auto fused_node = g.node(fused_id);
184  std::vector<NodeIdxPair> bn_driving_nodes = get_driving_nodes(*bn_node);
185 
186  // Extract batch normalization node accessor if any
187  auto bn_node_accessor = bn_node->output(0)->extract_accessor();
188  auto bn_node_name = bn_node->name();
189 
190  // Remove batch normalization node
191  g.remove_node(bn_node->id());
192 
193  // Get driving nodes of batch normalization node
194  for(auto &driving_node : bn_driving_nodes)
195  {
196  g.add_connection(fused_id, 0, driving_node.node_id, driving_node.index);
197  configure_tensor(fused_node->output(0));
198  }
199  // Update fused node outputs
200  fused_node->output(0)->set_accessor(std::move(bn_node_accessor));
201  fused_node->set_assigned_target(assigned_target);
202  fused_node->set_common_node_parameters(NodeParams{ depth_conv_node->name() + "+" + bn_node_name, assigned_target });
203 
204  // Remove convolution node
205  g.remove_node(depth_conv_node->id());
206  }
207  else
208  {
209  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion of depthwise convolution with batch normalization due to the presence of an output accessor\n");
210  }
211 }
void configure_tensor(Tensor *tensor)
Configures tensor.
Definition: Utils.cpp:186
std::vector< NodeIdxPair > get_driving_nodes(const INode &node)
Get the list of driving nodes of a given node.
Definition: Utils.cpp:166
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
Target
< Target enum
Definition: Acl.hpp:293
unsigned int NodeID
Definition: Types.h:68
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ fuse_layer()

void arm_compute::graph::detail::fuse_layer ( Graph g,
std::function< bool(INode &)> const &  prec,
const F  fuse_fcn,
Args &&...  optional_arguments 
)

Definition at line 269 of file NodeFusionMutator.cpp.

References Graph::edge(), Graph::node(), Graph::nodes(), and INode::output_edges().

270 {
271  // Note that fused nodes may be added to the end of the node list.
272  // Instead of only looping over the original list of nodes, we loop over the current node list which could be growing.
273  // This is intentional as it probes the newly added fused nodes for further fusing opportunities.
274  for(unsigned int i = 0; i < g.nodes().size(); ++i)
275  {
276  auto node = g.node(i);
277  // Check if the node is of type N and not a branching node
278  if(node && node->type() == N1::node_type && node->output_edges().size() == 1)
279  {
280  const auto output_edge_id = *node->output_edges().begin();
281  const auto output_edge = g.edge(output_edge_id);
282 
283  // Check if following node is an activation layer node
284  if((output_edge != nullptr) && (output_edge->consumer() != nullptr) && (output_edge->consumer()->type() == N2::node_type) && prec(*output_edge->producer()))
285  {
286  fuse_fcn(g, output_edge, optional_arguments...);
287  }
288  }
289  }
290 }

◆ fuse_node_with_activation()

void arm_compute::graph::detail::fuse_node_with_activation ( Graph g,
const Edge output_edge,
const std::set< Activation > &  supported_fused_activations 
)

Definition at line 214 of file NodeFusionMutator.cpp.

References Graph::add_connection(), ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_VERBOSE, Edge::consumer(), Edge::consumer_id(), arm_compute::graph::EltwiseLayer, arm_compute::graph::get_driving_nodes(), arm_compute::is_data_type_float(), Edge::producer(), Edge::producer_id(), and Graph::remove_node().

215 {
216  ARM_COMPUTE_ERROR_ON(output_edge == nullptr);
217 
218  auto *n_node = arm_compute::utils::cast::polymorphic_downcast<N *>(output_edge->producer());
219  auto *act_node = arm_compute::utils::cast::polymorphic_downcast<ActivationLayerNode *>(output_edge->consumer());
220 
221  ARM_COMPUTE_ERROR_ON(act_node->output(0) == nullptr || n_node->output(0) == nullptr);
222 
223  // Check if activation is supported for fusion
224  if(supported_fused_activations.count(act_node->activation_info().activation()) == 0)
225  {
226  return;
227  }
228 
229  // EltwiseLayerNode can only be fused when dataype is float
230  if(n_node->type() == NodeType::EltwiseLayer && !is_data_type_float(n_node->output(0)->desc().data_type))
231  {
232  return;
233  }
234 
235  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Fusing node with ID : " << output_edge->producer_id()
236  << " with Activation Layer node with ID : " << output_edge->consumer_id() << std::endl);
237 
238  // Prevent fusion if fused node has an output accessor
239  if(n_node->output(0)->accessor() == nullptr)
240  {
241  // Get driving nodes of activation node
242  std::vector<NodeIdxPair> act_driving_nodes = get_driving_nodes(*act_node);
243 
244  // Set activation info to fused node
245  n_node->set_fused_activation(act_node->activation_info());
246 
247  // Extract activation node accessor if any
248  auto act_node_accessor = act_node->output(0)->extract_accessor();
249 
250  // Remove activation node
251  g.remove_node(act_node->id());
252 
253  // Update fused node outputs
254  for(auto &driving_node : act_driving_nodes)
255  {
256  g.add_connection(n_node->id(), 0, driving_node.node_id, driving_node.index);
257  }
258 
259  // Update accessor to fused node
260  n_node->output(0)->set_accessor(std::move(act_node_accessor));
261  }
262  else
263  {
264  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Prevented fusion of node with activation due to the presence of an output accessor\n");
265  }
266 }
std::vector< NodeIdxPair > get_driving_nodes(const INode &node)
Get the list of driving nodes of a given node.
Definition: Utils.cpp:166
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
bool is_data_type_float(DataType dt)
Check if a given data type is of floating point type.
Definition: Utils.h:961

◆ prepare_all_tasks()

void prepare_all_tasks ( ExecutionWorkload workload)

Prepares all tasks for execution.

Parameters
[in]workloadWorkload to prepare

Definition at line 220 of file ExecutionHelpers.cpp.

References ARM_COMPUTE_ERROR_ON, ExecutionWorkload::graph, release_unused_tensors(), and ExecutionWorkload::tasks.

Referenced by GraphManager::finalize_graph().

221 {
222  ARM_COMPUTE_ERROR_ON(workload.graph == nullptr);
223  for(auto &task : workload.tasks)
224  {
225  task.prepare();
226  release_unused_tensors(*workload.graph);
227  }
228 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
void release_unused_tensors(Graph &g)
Release the memory of all unused const nodes.

◆ release_unused_tensors()

void release_unused_tensors ( Graph g)

Release the memory of all unused const nodes.

Parameters
[in]gGraph to release the memory from

Definition at line 176 of file ExecutionHelpers.cpp.

References Graph::tensors().

Referenced by prepare_all_tasks().

177 {
178  for(auto &tensor : g.tensors())
179  {
180  if(tensor != nullptr && tensor->handle() != nullptr)
181  {
182  tensor->handle()->release_if_unused();
183  }
184  }
185 }

◆ validate_all_nodes()

void validate_all_nodes ( Graph g)

Validates all nodes.

Parameters
[in]gGraph to validate

Definition at line 39 of file ExecutionHelpers.cpp.

References ARM_COMPUTE_ERROR_ON_MSG, Status::error_description(), BackendRegistry::get(), BackendRegistry::get_backend(), Graph::nodes(), and IDeviceBackend::validate_node().

Referenced by GraphManager::finalize_graph().

40 {
41  auto &nodes = g.nodes();
42 
43  // Create tasks
44  for(auto &node : nodes)
45  {
46  if(node != nullptr)
47  {
48  Target assigned_target = node->assigned_target();
49  backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target);
50  Status status = backend.validate_node(*node);
51  ARM_COMPUTE_ERROR_ON_MSG(!bool(status), status.error_description().c_str());
52  }
53  }
54 }
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
Target
< Target enum
Definition: Acl.hpp:293