41 auto &nodes = g.
nodes();
44 for(
auto &node : nodes)
48 Target assigned_target = node->assigned_target();
60 for(
auto &tensor : tensors)
62 if(tensor && tensor->handle() ==
nullptr)
64 Target target = tensor->desc().target;
66 std::unique_ptr<ITensorHandle> handle = backend.
create_tensor(*tensor);
68 tensor->set_handle(std::move(handle));
75 for(
unsigned int i = 0; i < node.
num_inputs(); ++i)
78 if(tensor !=
nullptr && !tensor->
bound_edges().empty())
88 for(
unsigned int i = 0; i < node.
num_outputs(); ++i)
91 if(tensor !=
nullptr && !tensor->
bound_edges().empty())
101 for(
auto &node : g.
nodes())
124 for(
auto &tensor : tensors)
126 if(tensor && !tensor->bound_edges().empty() && tensor->handle() !=
nullptr && tensor->handle()->tensor().info()->is_resizable() && tensor->handle()->tensor().is_used())
128 tensor->handle()->allocate();
140 workload.
tasks.reserve(node_order.size());
143 for(
auto &node_id : node_order)
145 auto node = g.
node(node_id);
159 for(
auto &node : g.
nodes())
163 workload.
inputs.push_back(node->output(0));
168 workload.
outputs.push_back(node->input(0));
178 for(
auto &tensor : g.
tensors())
180 if(tensor !=
nullptr && tensor->handle() !=
nullptr)
182 tensor->handle()->release_if_unused();
195 auto &nodes = g.
nodes();
197 for(
auto &node : nodes)
199 if(node !=
nullptr && node->type() ==
NodeType::Const && node->num_outputs())
201 if(!node->output(0)->bound_edges().empty())
211 bool is_valid =
true;
214 bool valid_input = (input_tensor != nullptr) && input_tensor->call_accessor();
215 is_valid = is_valid && valid_input;
223 for(
auto &task : workload.
tasks)
237 if(mm_ctx.second.cross_group !=
nullptr)
239 mm_ctx.second.cross_group->acquire();
244 for(
auto &task : workload.
tasks)
252 if(mm_ctx.second.cross_group !=
nullptr)
254 mm_ctx.second.cross_group->release();
261 bool is_valid =
true;
264 bool valid_output = (output_tensor != nullptr) && output_tensor->call_accessor();
265 is_valid = is_valid && valid_output;
std::vector< Tensor * > inputs
Input handles.
bool call_accessor()
Calls accessor on tensor.
virtual std::unique_ptr< arm_compute::IFunction > configure_node(INode &node, GraphContext &ctx)=0
Configure a backend Node.
std::map< Target, MemoryManagerContext > & memory_managers()
Gets the memory managers map.
ITensorHandle * handle()
Backend tensor handle accessor.
void call_tensor_accessor(Tensor *tensor)
Calls accessor of a given tensor.
Target assigned_target() const
Returns assigned target for this node.
bool is_utility_node(INode *node)
size_t num_outputs() const
Returns number of outputs of the node.
void allocate_all_tensors(Graph &g)
Allocates all tensors of a graph.
IDeviceBackend & get_backend(Target target)
Get a backend from the registry.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
void prepare_all_tasks(ExecutionWorkload &workload)
Prepares all tasks for execution.
Copyright (c) 2017-2021 Arm Limited.
virtual std::unique_ptr< ITensorHandle > create_tensor(const Tensor &tensor)=0
Create a backend Tensor.
void configure_all_tensors(Graph &g)
Configures all nodes of a graph.
std::vector< std::unique_ptr< Tensor > > & tensors()
Returns tensors of graph.
void allocate_all_input_tensors(INode &node)
Allocates all input tensors of a node.
void validate_all_nodes(Graph &g)
Validates all nodes.
std::vector< ExecutionTask > tasks
Execution workload.
void allocate_all_output_tensors(INode &node)
Allocates all output tensors of a node.
Tensor * output(size_t idx) const
Returns the tensor of a given output of the node.
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
void release_unused_tensors(Graph &g)
Release the memory of all unused const nodes.
virtual void allocate()=0
Allocates backend memory for the handle.
void end(TokenStream &in, bool &valid)
void call_all_const_node_accessors(Graph &g)
Call all const node accessors.
bool call_all_input_node_accessors(ExecutionWorkload &workload)
Call all input node accessors.
static BackendRegistry & get()
Gets backend registry instance.
void for_each(F &&)
Base case of for_each.
Graph * graph
Graph bound to the workload.
virtual Status validate_node(INode &node)=0
Validate a node.
ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx, const std::vector< NodeID > &node_order)
Configures all nodes of graph.
const std::vector< NodeID > & nodes(NodeType type)
Returns graph input nodes.
void allocate_const_tensors(Graph &g)
Allocates const tensor of a given graph.
const INode * node(NodeID id) const
Get node object given its id.
GraphContext * ctx
Graph execution context.
size_t num_inputs() const
Returns number of inputs of the node.
std::set< EdgeID > bound_edges() const
Accessor the edges that are bound with the tensor.
Device backend interface.
void call_all_tasks(ExecutionWorkload &workload)
Executes all tasks of a workload.
Tensor * input(size_t idx) const
Returns the tensor of a given input of the node.
bool call_all_output_node_accessors(ExecutionWorkload &workload)
Call all output node accessors.
std::vector< Tensor * > outputs
Output handles.
std::string error_description() const
Gets error description if any.