Compute Library
 21.11
FunctionHelpers.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25 #define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
26 
38 
39 #include "arm_compute/core/Error.h"
42 #include "support/Cast.h"
43 
44 namespace arm_compute
45 {
46 namespace graph
47 {
48 namespace backends
49 {
50 namespace detail
51 {
52 /** Returns backing tensor of a given tensor
53  *
54  * @tparam TargetInfo Target information
55  *
56  * @param[in] tensor Tensor to extract the backing tensor from
57  *
58  * @return Backing tensor if present else nullptr
59  */
60 template <typename TargetInfo>
62 {
63  typename TargetInfo::TensorType *backing_tensor = nullptr;
64  if(tensor != nullptr)
65  {
66  ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
67  // Get backing tensor handle
68  ITensorHandle *tensor_handle = tensor->handle();
69  // Get backing tensor
70  backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
71  }
72 
73  return backing_tensor;
74 }
75 
76 template <typename TargetInfo>
77 void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
78 {
79  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
80  << " Target: " << TargetInfo::TargetType
81  << " ID: " << node.id()
82  << node.name()
83  << std::endl);
84 
85  ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
86  ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
87  ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
88  ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
89 }
90 
91 /** Creates a backend activation layer function
92  *
93  * @tparam ActivationLayerFunction Backend activation function
94  * @tparam TargetInfo Target-specific information
95  *
96  * @param[in] node Node to create the backend function for
97  *
98  * @return Backend activation layer function
99  */
100 template <typename ActivationLayerFunction, typename TargetInfo>
101 std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
102 {
103  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
104 
105  // Extract IO and info
106  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
107  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
108  const ActivationLayerInfo act_info = node.activation_info();
109 
110  // Create function
111  auto func = std::make_unique<ActivationLayerFunction>();
112  func->configure(input, output, act_info);
113 
114  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
115  << node.name()
116  << " Type: " << node.type()
117  << " Target: " << TargetInfo::TargetType
118  << " Data Type: " << input->info()->data_type()
119  << " Shape: " << input->info()->tensor_shape()
120  << " Activation function: " << act_info.activation()
121  << " a: " << act_info.a()
122  << " b: " << act_info.b()
123  << " InPlace : " << is_in_place_operation(input, output)
124  << std::endl);
125 
126  return std::move(func);
127 }
128 
129 /** Creates a backend argminmax layer function
130  *
131  * @tparam ArgMinMaxLayerFunction Backend activation function
132  * @tparam TargetInfo Target-specific information
133  *
134  * @param[in] node Node to create the backend function for
135  *
136  * @return Backend argminmax layer function
137  */
138 template <typename ArgMinMaxLayerFunction, typename TargetInfo>
139 std::unique_ptr<IFunction> create_arg_min_max_layer(ArgMinMaxLayerNode &node)
140 {
141  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
142 
143  // Extract IO and info
144  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
145  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
146  const ReductionOperation op = node.reduction_operation();
147  unsigned int axis = node.axis();
148 
149  // Create function
150  auto func = std::make_unique<ArgMinMaxLayerFunction>();
151  func->configure(input, axis, output, op);
152 
153  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
154  << node.name()
155  << " Type: " << node.type()
156  << " Target: " << TargetInfo::TargetType
157  << " Data Type: " << input->info()->data_type()
158  << " Shape: " << input->info()->tensor_shape()
159  << " Reduction Operation: " << op
160  << " axis: " << axis
161  << std::endl);
162 
163  return std::move(func);
164 }
165 
166 /** Create a backend batch normalization layer function
167  *
168  * @tparam BatchNormalizationLayerFunction Backend batch normalization function
169  * @tparam TargetInfo Target-specific information
170  *
171  * @param[in] node Node to create the backend function for
172  *
173  * @return Backend batch normalization layer function
174  */
175 template <typename BatchNormalizationLayerFunction, typename TargetInfo>
177 {
178  validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
179 
180  // Extract IO and info
181  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
182  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
183  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
184  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
185  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
186 
187  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
188  const float epsilon = node.epsilon();
189  const ActivationLayerInfo fused_act = node.fused_activation();
190 
191  // Create and configure function
192  auto func = std::make_unique<BatchNormalizationLayerFunction>();
193  func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
194 
195  // Log info
196  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
197  << node.name()
198  << " Type: " << node.type()
199  << " Target: " << TargetInfo::TargetType
200  << " Data Type: " << input->info()->data_type()
201  << " Shape: " << input->info()->tensor_shape()
202  << " Epsilon: " << epsilon << " "
203  << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
204  << " InPlace: " << is_in_place_operation(input, output)
205  << std::endl);
206 
207  return std::move(func);
208 }
209 
210 /** Create a backend batch normalization layer function
211  *
212  * @tparam BatchNormalizationLayerFunction Backend batch normalization function
213  * @tparam TargetInfo Target-specific information
214  *
215  * @param[in] node Node to create the backend function for
216  * @param[in] ctx Graph context
217  *
218  * @return Backend batch normalization layer function
219  */
220 template <typename FusedLayerTypes, typename TargetInfo>
222 {
223  validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
224 
225  // Extract IO and info
226  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
227  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
228  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
229  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
230  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
231  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
232  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
233 
234  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
235 
236  const PadStrideInfo conv_info = node.convolution_info();
237  const unsigned int num_groups = node.num_groups();
238  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
239  const ActivationLayerInfo fused_act = node.fused_activation();
240  const float epsilon = node.epsilon();
241 
242  // Create and configure function (we assume that functions have been validated before creation)
243  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
244  std::unique_ptr<IFunction> func;
245  std::string func_name;
246 
248 
249  // Create and configure function
250  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
251  std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
252 
253  // Log info
254  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
255  << node.name()
256  << " Type: " << node.type()
257  << " Target: " << TargetInfo::TargetType
258  << " Data Type: " << input->info()->data_type()
259  << " Input shape: " << input->info()->tensor_shape()
260  << " Weights shape: " << weights->info()->tensor_shape()
261  << " Output shape: " << output->info()->tensor_shape()
262  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
263  << std::endl);
264  return std::move(func);
265 }
266 
267 /** Create a backend fused depthwise convolution batch normalization layer function
268  *
269  * @tparam FusedLayerTypes Fused layer types
270  * @tparam TargetInfo Target-specific information
271  *
272  * @param[in] node Node to create the backend function for
273  * @param[in] ctx Graph context
274  *
275  * @return Backend fused depthwise convolution batch normalization layer function
276  */
277 template <typename FusedLayerTypes, typename TargetInfo>
279 {
280  validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
281 
282  // Extract IO and info
283  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
284  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
285  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
286  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
287  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
288  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
289  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
290 
291  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
292 
293  const PadStrideInfo conv_info = node.convolution_info();
294  const unsigned int depth_multiplier = node.depth_multiplier();
295  const ActivationLayerInfo fused_act = node.fused_activation();
296  const float epsilon = node.epsilon();
297 
298  // Create and configure function (we assume that functions have been validated before creation)
299  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
300  std::unique_ptr<IFunction> func;
301  std::string func_name;
302 
304 
305  // Create and configure function
306  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
307  std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
308 
309  // Log info
310  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
311  << node.name()
312  << " Type: " << node.type()
313  << " Target: " << TargetInfo::TargetType
314  << " Data Type: " << input->info()->data_type()
315  << " Input shape: " << input->info()->tensor_shape()
316  << " Weights shape: " << weights->info()->tensor_shape()
317  << " Output shape: " << output->info()->tensor_shape()
318  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
319  << std::endl);
320  return std::move(func);
321 }
322 
323 /** Create a backend bounding box transform layer function
324  *
325  * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
326  * @tparam TargetInfo Target-specific information
327  *
328  * @param[in] node Node to create the backend function for
329  *
330  * @return Backend bounding box transform layer function
331  */
332 template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
334 {
335  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
336 
337  // Extract IO and info
338  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
339  typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
340  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
341  const BoundingBoxTransformInfo bbox_info = node.info();
342 
343  // Create and configure function
344  auto func = std::make_unique<BoundingBoxTransformLayerFunction>();
345  func->configure(input, output, deltas, bbox_info);
346 
347  // Log info
348  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
349  << node.name()
350  << " Type: " << node.type()
351  << " Target: " << TargetInfo::TargetType
352  << " Data Type: " << input->info()->data_type()
353  << " Shape: " << input->info()->tensor_shape()
354  << " BoundingBox Info img W: " << bbox_info.img_width() << " "
355  << " BoundingBox Info img H: " << bbox_info.img_height() << " "
356  << std::endl);
357 
358  return std::move(func);
359 }
360 
361 /** Create a backend channel shuffle layer function
362  *
363  * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
364  * @tparam TargetInfo Target-specific information
365  *
366  * @param[in] node Node to create the backend function for
367  *
368  * @return Backend channel shuffle layer function
369  */
370 template <typename ChannelShuffleLayerFunction, typename TargetInfo>
371 std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
372 {
373  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
374 
375  // Extract IO and info
376  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
377  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
378  const unsigned int num_groups = node.num_groups();
379 
380  // Create function
381  auto func = std::make_unique<ChannelShuffleLayerFunction>();
382  func->configure(input, output, num_groups);
383 
384  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
385  << node.name()
386  << " Type: " << node.type()
387  << " Target: " << TargetInfo::TargetType
388  << " Data Type: " << input->info()->data_type()
389  << " Shape: " << input->info()->tensor_shape()
390  << " Num groups: " << num_groups
391  << std::endl);
392 
393  return std::move(func);
394 }
395 
396 /** Create a backend layer concatenate function
397  *
398  * @tparam ConcatenateLayerFunction Backend concatenate function
399  * @tparam TargetInfo Target-specific information
400  *
401  * @param[in] node Node to create the backend function for
402  *
403  * @return Backend concatenate layer function
404  */
405 template <typename ConcatenateLayerFunction, typename TargetInfo>
406 std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
407 {
408  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
409  ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
410 
411  // Return nullptr if depth concatenate is switched off
412  if(!node.is_enabled())
413  {
414  return nullptr;
415  }
416 
417  // Extract IO and info
418  std::vector<typename TargetInfo::SrcTensorType *> inputs;
419  for(unsigned int i = 0; i < node.num_inputs(); ++i)
420  {
421  inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
422  }
423  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
424  const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
425  const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
426 
427  // Create and configure function
428  auto func = std::make_unique<ConcatenateLayerFunction>();
429  func->configure(inputs, output, concat_axis);
430 
431  // Log info
432  const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
433  std::ostringstream qss;
434  if(is_quantized)
435  {
436  qss << " Output QuantInfo: " << output->info()->quantization_info();
437  }
438  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
439  << node.name()
440  << " Type: " << node.type()
441  << " Target: " << TargetInfo::TargetType
442  << " Data Type: " << output->info()->data_type()
443  << " Shape: " << output->info()->tensor_shape()
444  << " Num Inputs: " << inputs.size()
445  << " Axis: " << concat_axis
446  << qss.str()
447  << std::endl);
448 
449  return std::move(func);
450 }
451 
452 /** Create a backend convolution layer function
453  *
454  * @tparam ConvolutionLayerFunctions Backend convolution functions
455  * @tparam TargetInfo Target-specific information
456  *
457  * @param[in] node Node to create the backend function for
458  * @param[in] ctx Graph context
459  *
460  * @return Backend convolution layer function
461  */
462 template <typename ConvolutionLayerFunctions, typename TargetInfo>
463 std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
464 {
465  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
466 
467  // Extract IO and info
468  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
469  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
470  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
471  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
472 
473  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
474 
475  if(is_quantized)
476  {
477  biases->info()->set_data_type(DataType::S32);
478  }
479 
480  const PadStrideInfo conv_info = node.convolution_info();
481  const unsigned int num_groups = node.num_groups();
482  const ConvolutionMethod conv_algorithm = node.convolution_method();
483  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
484  const ActivationLayerInfo fused_act = node.fused_activation();
485 
486  // Create and configure function (we assume that functions have been validated before creation)
487  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
488  std::unique_ptr<IFunction> func;
489  std::string func_name;
490 
491  if(conv_algorithm == ConvolutionMethod::Winograd)
492  {
493  ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
494  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
495  std::string("WinogradConvolutionLayer"), mm,
496  input, weights, biases, output, conv_info, fused_act, fast_math);
497  }
498  else if(conv_algorithm == ConvolutionMethod::Direct)
499  {
500  ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
501  std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
502  std::string("DirectConvolutionLayer"),
503  input, weights, biases, output, conv_info, fused_act);
504  }
505  else if(conv_algorithm == ConvolutionMethod::GEMM)
506  {
507  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
508  std::string("GEMMConvolutionLayer"), mm,
509  input, weights, biases, output, conv_info,
510  WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
511  }
512  else
513  {
514  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
515  std::string("GenericConvolutionLayer"), mm,
516  input, weights, biases, output, conv_info,
517  WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
518  }
519 
520  // Log info
521  std::ostringstream qss;
522  if(is_quantized)
523  {
524  qss << " Input QuantInfo: " << input->info()->quantization_info()
525  << " Weights QuantInfo: " << weights->info()->quantization_info()
526  << " Output QuantInfo: " << output->info()->quantization_info();
527  }
528  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
529  << node.name()
530  << " Type: " << func_name
531  << " Target: " << TargetInfo::TargetType
532  << " Data Type: " << input->info()->data_type()
533  << " Groups: " << num_groups
534  << " Input shape: " << input->info()->tensor_shape()
535  << " Weights shape: " << weights->info()->tensor_shape()
536  << " Output shape: " << output->info()->tensor_shape()
537  << qss.str()
538  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
539  << std::endl);
540  return std::move(func);
541 }
542 
543 /** Create a backend convolution layer function with post opreator
544  *
545  * @tparam ConvolutionLayerFunctions Backend convolution functions
546  * @tparam TargetInfo Target-specific information
547  *
548  * @param[in] node Node to create the backend function for
549  * @param[in] ctx Graph context
550  *
551  * @return Backend convolution layer function
552  */
553 template <typename ConvolutionLayerFunctions, typename TargetInfo>
555 {
556  validate_node<TargetInfo>(node, 4 /* expected inputs */, 1 /* expected outputs */);
557 
558  // Extract IO and info
559  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
560  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
561  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
562  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
563 
564  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
565 
566  if(is_quantized)
567  {
568  biases->info()->set_data_type(DataType::S32);
569  }
570 
571  const PadStrideInfo conv_info = node.convolution_info();
572  const unsigned int num_groups = node.num_groups();
573  const ActivationLayerInfo fused_act = node.fused_activation();
574 
576 
577  auto &post_op_info_list = node.post_op_info_list();
578  for(const auto &post_op_info : post_op_info_list)
579  {
580  switch(post_op_info->type())
581  {
583  {
584  const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
585  post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
586  break;
587  }
588  case PostOpType::Eltwise_Add:
589  {
590  typename TargetInfo::TensorType *add_input = get_backing_tensor<TargetInfo>(node.input(3));
591  const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
592  post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
593  break;
594  }
595  default:
596  {
597  ARM_COMPUTE_ERROR("Unsupported PostOpType");
598  }
599  }
600  }
601 
602  // Create and configure function (we assume that functions have been validated before creation)
603  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
604  std::unique_ptr<IFunction> func;
605  std::string func_name;
606 
607  // Fuse convolution with post ops is only supported for conv1x1, which is only implemented as gemmconv2d
608  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
609  std::string("GEMMConvolutionLayer"), mm,
610  input, weights, biases, output, conv_info,
611  WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups, post_ops);
612 
613  // Log info
614  std::ostringstream qss;
615  if(is_quantized)
616  {
617  qss << " Input QuantInfo: " << input->info()->quantization_info()
618  << " Weights QuantInfo: " << weights->info()->quantization_info()
619  << " Output QuantInfo: " << output->info()->quantization_info();
620  }
621  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
622  << node.name()
623  << " Type: " << func_name
624  << " Target: " << TargetInfo::TargetType
625  << " Data Type: " << input->info()->data_type()
626  << " Groups: " << num_groups
627  << " Input shape: " << input->info()->tensor_shape()
628  << " Weights shape: " << weights->info()->tensor_shape()
629  << " Output shape: " << output->info()->tensor_shape()
630  << qss.str()
631  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
632  << std::endl);
633  return std::move(func);
634 }
635 
636 /** Create a backend deconvolution layer function
637  *
638  * @tparam DeconvolutionLayerFunction Backend deconvolution function
639  * @tparam TargetInfo Target-specific information
640  *
641  * @param[in] node Node to create the backend function for
642  * @param[in] ctx Graph context
643  *
644  * @return Backend deconvolution layer function
645  */
646 template <typename DeconvolutionLayerFunction, typename TargetInfo>
647 std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
648 {
649  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
650 
651  // Extract IO and info
652  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
653  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
654  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
655  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
656 
657  const PadStrideInfo deconv_info = node.deconvolution_info();
658 
659  // Create and configure function (we assume that functions have been validated before creation)
660  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
661  std::unique_ptr<IFunction> func;
662 
663  std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
664  std::string(), mm,
665  input, weights, biases, output, deconv_info);
666 
667  // Log info
668  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
669  << node.name()
670  << " Type: " << node.type()
671  << " Target: " << TargetInfo::TargetType
672  << " Data Type: " << input->info()->data_type()
673  << " Input shape: " << input->info()->tensor_shape()
674  << " Weights shape: " << weights->info()->tensor_shape()
675  << " Output shape: " << output->info()->tensor_shape()
676  << std::endl);
677  return func;
678 }
679 
680 /** Create a backend layer depth-wise convolution function
681  *
682  * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
683  * @tparam TargetInfo Target-specific information
684  *
685  * @param[in] node Node to create the backend function for
686  *
687  * @return Backend depth-wise convolution layer function
688  */
689 template <typename DepthwiseConvolutionLayer, typename TargetInfo>
691 {
692  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
693 
694  // Extract IO and info
695  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
696  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
697  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
698  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
699 
700  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
701 
702  if(is_quantized)
703  {
704  biases->info()->set_data_type(DataType::S32);
705  }
706 
707  const PadStrideInfo conv_info = node.convolution_info();
708  const unsigned int depth_multiplier = node.depth_multiplier();
709  const ActivationLayerInfo fused_act = node.fused_activation();
710 
711  // Create and configure function (we assume that functions have been validated before creation)
712  std::unique_ptr<IFunction> func;
713  std::string func_name;
714 
715  std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
716  std::string("DepthwiseConvolutionLayer"),
717  input, weights, biases, output, conv_info, depth_multiplier, fused_act);
718 
719  // Log info
720  std::ostringstream qss;
721  if(is_quantized)
722  {
723  qss << " Input QuantInfo: " << input->info()->quantization_info()
724  << " Weights QuantInfo: " << weights->info()->quantization_info()
725  << " Output QuantInfo: " << output->info()->quantization_info();
726  }
727  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
728  << node.name()
729  << " Type: " << func_name
730  << " Target: " << TargetInfo::TargetType
731  << " Data Type: " << input->info()->data_type()
732  << " Input shape: " << input->info()->tensor_shape()
733  << " Weights shape: " << weights->info()->tensor_shape()
734  << " Output shape: " << output->info()->tensor_shape()
735  << " Depth multiplier: " << depth_multiplier
736  << qss.str()
737  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
738  << std::endl);
739  return std::move(func);
740 }
741 
742 /** Create a backend depth to space layer function
743  *
744  * @tparam DepthToSpaceLayerNode Function Backend depth to space function
745  * @tparam TargetInfo Target-specific information
746  *
747  * @param[in] node Node to create the backend function for
748  *
749  * @return Backend depth to space layer function
750  */
751 template <typename DepthToSpaceLayerFunction, typename TargetInfo>
752 std::unique_ptr<IFunction> create_depth_to_space_layer(DepthToSpaceLayerNode &node)
753 {
754  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
755 
756  // Extract IO and info
757  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
758  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
759 
760  ARM_COMPUTE_ERROR_ON(input == nullptr);
761  ARM_COMPUTE_ERROR_ON(output == nullptr);
762 
763  // Create and configure function
764  auto func = std::make_unique<DepthToSpaceLayerFunction>();
765  func->configure(input, output, node.block_shape());
766 
767  // Log info
768  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
769  << node.name()
770  << " Type: " << node.type()
771  << " Target: " << TargetInfo::TargetType
772  << " Data Type: " << input->info()->data_type()
773  << " Input shape: " << input->info()->tensor_shape()
774  << " Block Size: " << node.block_shape()
775  << " Output shape: " << output->info()->tensor_shape()
776  << std::endl);
777 
778  return std::move(func);
779 }
780 
781 /** Create a backend dequantize layer function
782  *
783  * @tparam DequantizationLayer Function Backend dequantize function
784  * @tparam TargetInfo Target-specific information
785  *
786  * @param[in] node Node to create the backend function for
787  *
788  * @return Backend dequantize layer function
789  */
790 template <typename DequantizationLayerFunction, typename TargetInfo>
791 std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
792 {
793  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
794 
795  // Extract IO and info
796  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
797  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
798 
799  ARM_COMPUTE_ERROR_ON(input == nullptr);
800  ARM_COMPUTE_ERROR_ON(output == nullptr);
801 
802  // Create and configure function
803  auto func = std::make_unique<DequantizationLayerFunction>();
804  func->configure(input, output);
805 
806  // Log info
807  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
808  << node.name()
809  << " Type: " << node.type()
810  << " Target: " << TargetInfo::TargetType
811  << " Data Type: " << input->info()->data_type()
812  << " Input shape: " << input->info()->tensor_shape()
813  << " Input quantization info: " << output->info()->quantization_info()
814  << " Output shape: " << output->info()->tensor_shape()
815  << std::endl);
816 
817  return std::move(func);
818 }
819 /** Create a backend detection output layer function
820  *
821  * @tparam DetectionOutputLayer Function Backend detection output function
822  * @tparam TargetInfo Target-specific information
823  *
824  * @param[in] node Node to create the backend function for
825  *
826  * @return Backend detection output layer function
827  */
828 template <typename DetectionOutputLayerFunction, typename TargetInfo>
830 {
831  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
832 
833  // Extract IO and info
834  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
835  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
836  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
837  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
838  const DetectionOutputLayerInfo detect_info = node.detection_output_info();
839 
840  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
841  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
842  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
843  ARM_COMPUTE_ERROR_ON(output == nullptr);
844 
845  // Create and configure function
846  auto func = std::make_unique<DetectionOutputLayerFunction>();
847  func->configure(input0, input1, input2, output, detect_info);
848 
849  // Log info
850  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
851  << node.name()
852  << " Type: " << node.type()
853  << " Target: " << TargetInfo::TargetType
854  << " Data Type: " << input0->info()->data_type()
855  << " Input0 shape: " << input0->info()->tensor_shape()
856  << " Input1 shape: " << input1->info()->tensor_shape()
857  << " Input2 shape: " << input2->info()->tensor_shape()
858  << " Output shape: " << output->info()->tensor_shape()
859  << " DetectionOutputLayer info: " << detect_info
860  << std::endl);
861 
862  return std::move(func);
863 }
864 
865 /** Create a backend detection post process layer function
866  *
867  * @tparam DetectionPostProcessLayerFunction Backend detection output function
868  * @tparam TargetInfo Target-specific information
869  *
870  * @param[in] node Node to create the backend function for
871  *
872  * @return Backend detection post process layer function
873  */
874 template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
876 {
877  validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
878 
879  // Extract IO and info
880  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
881  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
882  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
883  typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
884  typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
885  typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
886  typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
887  const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
888 
889  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
890  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
891  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
892  ARM_COMPUTE_ERROR_ON(output0 == nullptr);
893  ARM_COMPUTE_ERROR_ON(output1 == nullptr);
894  ARM_COMPUTE_ERROR_ON(output2 == nullptr);
895  ARM_COMPUTE_ERROR_ON(output3 == nullptr);
896 
897  // Create and configure function
898  auto func = std::make_unique<DetectionPostProcessLayerFunction>();
899  func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
900 
901  // Log info
902  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
903  << node.name()
904  << " Type: " << node.type()
905  << " Target: " << TargetInfo::TargetType
906  << " Data Type: " << input0->info()->data_type()
907  << " Input0 shape: " << input0->info()->tensor_shape()
908  << " Input1 shape: " << input1->info()->tensor_shape()
909  << " Input2 shape: " << input2->info()->tensor_shape()
910  << " Output0 shape: " << output0->info()->tensor_shape()
911  << " Output1 shape: " << output1->info()->tensor_shape()
912  << " Output2 shape: " << output2->info()->tensor_shape()
913  << " Output3 shape: " << output3->info()->tensor_shape()
914  << " DetectionPostProcessLayer info: " << detect_info
915  << std::endl);
916 
917  return std::move(func);
918 }
919 
920 /** Create a backend element-wise operation layer function
921  *
922  * @tparam EltwiseFunctions Backend element-wise function
923  * @tparam TargetInfo Target-specific information
924  *
925  * @param[in] node Node to create the backend function for
926  *
927  * @return Backend element-wise operation layer function
928  */
929 template <typename EltwiseFunctions, typename TargetInfo>
930 std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
931 {
932  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
933 
934  // Extract IO and info
935  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
936  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
937  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
938  const EltwiseOperation eltwise_op = node.eltwise_operation();
939  const ConvertPolicy convert_policy = node.convert_policy();
940  const ActivationLayerInfo act_info = node.fused_activation();
941  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
942  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
943  ARM_COMPUTE_ERROR_ON(output == nullptr);
944 
945  std::unique_ptr<IFunction> func = nullptr;
946  std::string func_name;
947  if(eltwise_op == EltwiseOperation::Add)
948  {
949  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
950  std::string("ArithmeticAddition"),
951  input1, input2, output, convert_policy, act_info);
952  }
953  else if(eltwise_op == EltwiseOperation::Sub)
954  {
955  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
956  std::string("ArithmeticSubtraction"),
957  input1, input2, output, convert_policy, act_info);
958  }
959  else if(eltwise_op == EltwiseOperation::Mul)
960  {
961  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
962  std::string("PixelWiseMultiplication"),
963  input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
964  }
965  else if(eltwise_op == EltwiseOperation::Max)
966  {
967  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
968  std::string("ElementwiseMaximum"),
969  input1, input2, output, act_info);
970  }
971  else if(eltwise_op == EltwiseOperation::Div)
972  {
973  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Division>(
974  std::string("ArithmeticDivision"),
975  input1, input2, output, act_info);
976  }
977  else
978  {
979  ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
980  }
981 
982  // Log info
983  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
984  << node.name()
985  << " Type: " << node.type()
986  << " Target: " << TargetInfo::TargetType
987  << " Operation: " << func_name
988  << " Data Type: " << input1->info()->data_type()
989  << " Shape: " << input1->info()->tensor_shape()
990  << std::endl);
991 
992  return std::move(func);
993 }
994 
995 /** Create a backend unary element-wise operation layer function
996  *
997  * @tparam UnaryEltwiseFunctions Backend unary element-wise function
998  * @tparam TargetInfo Target-specific information
999  *
1000  * @param[in] node Node to create the backend function for
1001  *
1002  * @return Backend unary element-wise operation layer function
1003  */
1004 template <typename UnaryEltwiseFunctions, typename TargetInfo>
1005 std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
1006 {
1007  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1008 
1009  // Extract IO and info
1010  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1011  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1012  const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
1013 
1014  ARM_COMPUTE_ERROR_ON(input == nullptr);
1015  ARM_COMPUTE_ERROR_ON(output == nullptr);
1016 
1017  std::unique_ptr<IFunction> func = nullptr;
1018  std::string func_name;
1019  if(eltwise_op == UnaryEltwiseOperation::Exp)
1020  {
1021  std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
1022  std::string("Exp"),
1023  input, output);
1024  }
1025  else
1026  {
1027  ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
1028  }
1029 
1030  // Log info
1031  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1032  << node.name()
1033  << " Type: " << node.type()
1034  << " Target: " << TargetInfo::TargetType
1035  << " Operation: " << func_name
1036  << " Data Type: " << input->info()->data_type()
1037  << " Shape: " << input->info()->tensor_shape()
1038  << std::endl);
1039 
1040  return std::move(func);
1041 }
1042 
1043 /** Create a backend flatten layer function
1044  *
1045  * @tparam FlattenLayerFunction Backend flatten function
1046  * @tparam TargetInfo Target-specific information
1047  *
1048  * @param[in] node Node to create the backend function for
1049  *
1050  * @return Backend flatten layer function
1051  */
1052 template <typename FlattenLayerFunction, typename TargetInfo>
1053 std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
1054 {
1055  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1056 
1057  // Extract IO and info
1058  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1059  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1060 
1061  ARM_COMPUTE_ERROR_ON(input == nullptr);
1062  ARM_COMPUTE_ERROR_ON(output == nullptr);
1063 
1064  // Create and configure function
1065  auto func = std::make_unique<FlattenLayerFunction>();
1066  func->configure(input, output);
1067 
1068  // Log info
1069  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1070  << node.name()
1071  << " Type: " << node.type()
1072  << " Target: " << TargetInfo::TargetType
1073  << " Data Type: " << input->info()->data_type()
1074  << " Input shape: " << input->info()->tensor_shape()
1075  << " Output shape: " << output->info()->tensor_shape()
1076  << std::endl);
1077 
1078  return std::move(func);
1079 }
1080 
1081 /** Create a backend fully connected layer function
1082  *
1083  * @tparam FullyConnectedLayerFunction Backend fully-connected function
1084  * @tparam TargetInfo Target-specific information
1085  *
1086  * @param[in] node Node to create the backend function for
1087  * @param[in] ctx Graph context
1088  *
1089  * @return Backend fully connected layer function
1090  */
1091 template <typename FullyConnectedLayerFunction, typename TargetInfo>
1093 {
1094  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1095 
1096  // Extract IO and info
1097  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1098  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
1099  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
1100  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1101  FullyConnectedLayerInfo fc_info = node.info();
1102  fc_info.enable_fast_math = (node.fast_math_hint() == FastMathHint::Enabled);
1103 
1104  ARM_COMPUTE_ERROR_ON(input == nullptr);
1105  ARM_COMPUTE_ERROR_ON(weights == nullptr);
1106  ARM_COMPUTE_ERROR_ON(output == nullptr);
1107 
1108  // Create and configure function
1109  auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
1110  auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
1111  auto func = std::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
1112  func->configure(input, weights, biases, output, fc_info);
1113 
1114  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
1115 
1116  // Log info
1117  std::ostringstream qss;
1118  if(is_quantized)
1119  {
1120  qss << " Input QuantInfo: " << input->info()->quantization_info()
1121  << " Weights QuantInfo: " << weights->info()->quantization_info()
1122  << " Output QuantInfo: " << output->info()->quantization_info();
1123  }
1124  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1125  << node.name()
1126  << " Type: " << node.type()
1127  << " Target: " << TargetInfo::TargetType
1128  << " Data Type: " << input->info()->data_type()
1129  << qss.str()
1130  << " Input shape: " << input->info()->tensor_shape()
1131  << " Weights shape: " << weights->info()->tensor_shape()
1132  << " Output shape: " << output->info()->tensor_shape()
1133  << std::endl);
1134 
1135  return std::move(func);
1136 }
1137 
1138 /** Create a backend generate proposals layer function
1139  *
1140  * @tparam GenerateProposalsLayerFunction Backend generate proposals function
1141  * @tparam TargetInfo Target-specific information
1142  *
1143  * @param[in] node Node to create the backend function for
1144  * @param[in] ctx Graph context
1145  *
1146  * @return Backend generate proposals layer function
1147  */
1148 template <typename GenerateProposalsLayerFunction, typename TargetInfo>
1150 {
1151  validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
1152 
1153  // Extract IO and info
1154  typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
1155  typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
1156  typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
1157  typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
1158  typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
1159  typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
1160  const GenerateProposalsInfo info = node.info();
1161 
1162  ARM_COMPUTE_ERROR_ON(scores == nullptr);
1163  ARM_COMPUTE_ERROR_ON(deltas == nullptr);
1164  ARM_COMPUTE_ERROR_ON(anchors == nullptr);
1165  ARM_COMPUTE_ERROR_ON(proposals == nullptr);
1166  ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
1167 
1168  // Create and configure function
1169  auto func = std::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1170  func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1171 
1172  // Log info
1173  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1174  << " Target " << TargetInfo::TargetType
1175  << " Data Type: " << scores->info()->data_type()
1176  << " Scores shape: " << scores->info()->tensor_shape()
1177  << " Deltas shape: " << deltas->info()->tensor_shape()
1178  << " Anchors shape: " << anchors->info()->tensor_shape()
1179  << " Proposals shape: " << proposals->info()->tensor_shape()
1180  << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1181  << " Scores Out shape: " << scores_out->info()->tensor_shape()
1182  << std::endl);
1183 
1184  return std::move(func);
1185 }
1186 
1187 /** Create a backend l2 normalization layer function
1188  *
1189  * @tparam NormalizationLayerFunction Backend normalization function
1190  * @tparam TargetInfo Target-specific information
1191  *
1192  * @param[in] node Node to create the backend function for
1193  * @param[in] ctx Graph context
1194  *
1195  * @return Backend normalization layer function
1196  */
1197 template <typename L2NormalizeLayerFunction, typename TargetInfo>
1198 std::unique_ptr<IFunction> create_l2_normalize_layer(L2NormalizeLayerNode &node, GraphContext &ctx)
1199 {
1200  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1201 
1202  // Extract IO and info
1203  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1204  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1205  int axis = node.axis();
1206  float epsilon = node.epsilon();
1207 
1208  ARM_COMPUTE_ERROR_ON(input == nullptr);
1209  ARM_COMPUTE_ERROR_ON(output == nullptr);
1210 
1211  // Create and configure function
1212  auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
1213  auto func = std::make_unique<L2NormalizeLayerFunction>(mm);
1214  func->configure(input, output, axis, epsilon);
1215 
1216  // Log info
1217  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1218  << node.name()
1219  << " Type: " << node.type()
1220  << " Target: " << TargetInfo::TargetType
1221  << " Data Type: " << input->info()->data_type()
1222  << " Input shape: " << input->info()->tensor_shape()
1223  << " Output shape: " << output->info()->tensor_shape()
1224  << " Axis: " << axis
1225  << " Epsilon: " << epsilon
1226  << std::endl);
1227 
1228  return std::move(func);
1229 }
1230 
1231 /** Create a backend normalization layer function
1232  *
1233  * @tparam NormalizationLayerFunction Backend normalization function
1234  * @tparam TargetInfo Target-specific information
1235  *
1236  * @param[in] node Node to create the backend function for
1237  * @param[in] ctx Graph context
1238  *
1239  * @return Backend normalization layer function
1240  */
1241 template <typename NormalizationLayerFunction, typename TargetInfo>
1242 std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
1243 {
1244  ARM_COMPUTE_UNUSED(ctx);
1245 
1246  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1247 
1248  // Extract IO and info
1249  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1250  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1251  const NormalizationLayerInfo norm_info = node.normalization_info();
1252  ARM_COMPUTE_ERROR_ON(input == nullptr);
1253  ARM_COMPUTE_ERROR_ON(output == nullptr);
1254 
1255  // Create and configure function
1256  auto func = std::make_unique<NormalizationLayerFunction>();
1257  func->configure(input, output, norm_info);
1258 
1259  // Log info
1260  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1261  << node.name()
1262  << " Type: " << node.type()
1263  << " Target: " << TargetInfo::TargetType
1264  << " Data Type: " << input->info()->data_type()
1265  << " Input shape: " << input->info()->tensor_shape()
1266  << " Output shape: " << output->info()->tensor_shape()
1267  << " Normalization info: " << norm_info.type()
1268  << std::endl);
1269 
1270  return std::move(func);
1271 }
1272 
1273 /** Create a backend normalize planar YUV layer function
1274  *
1275  * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1276  * @tparam TargetInfo Target-specific information
1277  *
1278  * @param[in] node Node to create the backend function for
1279  *
1280  * @return Backend normalize plnar YUV layer function
1281  */
1282 template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1284 {
1285  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1286 
1287  // Extract IO and info
1288  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1289  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1290  typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1291  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1292  ARM_COMPUTE_ERROR_ON(input == nullptr);
1293  ARM_COMPUTE_ERROR_ON(mean == nullptr);
1294  ARM_COMPUTE_ERROR_ON(std == nullptr);
1295  ARM_COMPUTE_ERROR_ON(output == nullptr);
1296 
1297  // Create and configure function
1298  auto func = std::make_unique<NormalizePlanarYUVLayerFunction>();
1299  func->configure(input, output, mean, std);
1300 
1301  // Log info
1302  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1303  << node.name()
1304  << " Type: " << node.type()
1305  << " Target: " << TargetInfo::TargetType
1306  << " Data Type: " << input->info()->data_type()
1307  << " Shape: " << input->info()->tensor_shape()
1308  << std::endl);
1309 
1310  return std::move(func);
1311 }
1312 
1313 /** Create a backend pad layer function
1314  *
1315  * @tparam PadLayerFunction Backend pad function
1316  * @tparam TargetInfo Target-specific information
1317  *
1318  * @param[in] node Node to create the backend function for
1319  *
1320  * @return Backend pad layer function
1321  */
1322 template <typename PadLayerFunction, typename TargetInfo>
1323 std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1324 {
1325  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1326 
1327  // Extract IO and info
1328  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1329  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1330  const PaddingList &padding = node.padding();
1331  const PixelValue pad_value = node.pad_value();
1332  ARM_COMPUTE_ERROR_ON(input == nullptr);
1333  ARM_COMPUTE_ERROR_ON(output == nullptr);
1334 
1335  // Create and configure function
1336  auto func = std::make_unique<PadLayerFunction>();
1337  func->configure(input, output, padding, pad_value);
1338 
1339  // Log info
1340  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1341  << node.name()
1342  << " Type: " << node.type()
1343  << " Target: " << TargetInfo::TargetType
1344  << " Data Type: " << input->info()->data_type()
1345  << " Input shape: " << input->info()->tensor_shape()
1346  << " Output shape: " << output->info()->tensor_shape()
1347  << std::endl);
1348 
1349  return std::move(func);
1350 }
1351 
1352 /** Create a backend permute layer function
1353  *
1354  * @tparam PermuteLayerFunction Backend permute function
1355  * @tparam TargetInfo Target-specific information
1356  *
1357  * @param[in] node Node to create the backend function for
1358  *
1359  * @return Backend permute layer function
1360  */
1361 template <typename PermuteLayerFunction, typename TargetInfo>
1362 std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1363 {
1364  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1365 
1366  // Extract IO and info
1367  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1368  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1369  const PermutationVector &perm = node.permutation_vector();
1370  ARM_COMPUTE_ERROR_ON(input == nullptr);
1371  ARM_COMPUTE_ERROR_ON(output == nullptr);
1372 
1373  // Create and configure function
1374  auto func = std::make_unique<PermuteLayerFunction>();
1375  func->configure(input, output, perm);
1376 
1377  // Log info
1378  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1379  << node.name()
1380  << " Type: " << node.type()
1381  << " Target: " << TargetInfo::TargetType
1382  << " Data Type: " << input->info()->data_type()
1383  << " Input shape: " << input->info()->tensor_shape()
1384  << " Output shape: " << output->info()->tensor_shape()
1385  << " Permutation vector: " << perm
1386  << std::endl);
1387 
1388  return std::move(func);
1389 }
1390 
1391 /** Create a backend pooling layer function
1392  *
1393  * @tparam PoolingLayerFunction Backend pooling function
1394  * @tparam TargetInfo Target-specific information
1395  *
1396  * @param[in] node Node to create the backend function for
1397  *
1398  * @return Backend pooling layer function
1399  */
1400 template <typename PoolingLayerFunction, typename TargetInfo>
1401 std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1402 {
1403  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1404 
1405  // Extract IO and info
1406  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1407  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1408  const PoolingLayerInfo pool_info = node.pooling_info();
1409  ARM_COMPUTE_ERROR_ON(input == nullptr);
1410  ARM_COMPUTE_ERROR_ON(output == nullptr);
1411 
1412  // Create and configure function
1413  auto func = std::make_unique<PoolingLayerFunction>();
1414  func->configure(input, output, pool_info);
1415 
1416  // Log info
1417  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1418  << node.name()
1419  << " Type: " << node.type()
1420  << " Target: " << TargetInfo::TargetType
1421  << " Data Type: " << input->info()->data_type()
1422  << " Input shape: " << input->info()->tensor_shape()
1423  << " Output shape: " << output->info()->tensor_shape()
1424  << " Pooling info: " << pool_info.pool_type
1425  << std::endl);
1426 
1427  return std::move(func);
1428 }
1429 
1430 /** Create a backend PRelu layer function
1431  *
1432  * @tparam PReluFunction Backend PRelu function
1433  * @tparam TargetInfo Target-specific information
1434  *
1435  * @param[in] node Node to create the backend function for
1436  *
1437  * @return Backend PRelu layer function
1438  */
1439 template <typename PReluFunction, typename TargetInfo>
1440 std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1441 {
1442  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1443 
1444  // Extract IO and info
1445  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1446  typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1447  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1448  ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1449  ARM_COMPUTE_ERROR_ON(output == nullptr);
1450 
1451  // Create and configure function
1452  auto func = std::make_unique<PReluFunction>();
1453  func->configure(input, alpha, output);
1454 
1455  // Log info
1456  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1457  << node.name()
1458  << " Type: " << node.type()
1459  << " Target: " << TargetInfo::TargetType
1460  << " Data Type: " << input->info()->data_type()
1461  << " Input shape: " << input->info()->tensor_shape()
1462  << " Output shape: " << output->info()->tensor_shape()
1463  << std::endl);
1464 
1465  return std::move(func);
1466 }
1467 
1468 /** Create a backend print layer function
1469  *
1470  * @tparam TargetInfo Target-specific information
1471  *
1472  * @param[in] node Node to create the backend function for
1473  *
1474  * @return Backend print layer function
1475  */
1476 template <typename TargetInfo>
1477 std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1478 {
1479  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1480 
1481  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1482  ARM_COMPUTE_ERROR_ON(input == nullptr);
1483  ARM_COMPUTE_UNUSED(input);
1484 
1485  // Log info
1486  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1487  << node.name()
1488  << " Type: " << node.type()
1489  << " Target: " << TargetInfo::TargetType
1490  << " Data Type: " << input->info()->data_type()
1491  << " Input shape: " << input->info()->tensor_shape()
1492  << std::endl);
1493 
1494  return nullptr;
1495 }
1496 
1497 /** Create a backend priorbox layer function
1498  *
1499  * @tparam PriorBoxLayerFunction Backend priorbox function
1500  * @tparam TargetInfo Target-specific information
1501  *
1502  * @param[in] node Node to create the backend function for
1503  *
1504  * @return Backend priorbox layer function
1505  */
1506 template <typename PriorBoxLayerFunction, typename TargetInfo>
1507 std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1508 {
1509  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1510 
1511  // Extract IO and info
1512  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1513  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1514  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1515  const PriorBoxLayerInfo prior_info = node.priorbox_info();
1516  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1517  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1518  ARM_COMPUTE_ERROR_ON(output == nullptr);
1519 
1520  // Create and configure function
1521  auto func = std::make_unique<PriorBoxLayerFunction>();
1522  func->configure(input0, input1, output, prior_info);
1523 
1524  // Log info
1525  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1526  << node.name()
1527  << " Type: " << node.type()
1528  << " Target: " << TargetInfo::TargetType
1529  << " Data Type: " << input0->info()->data_type()
1530  << " Input0 shape: " << input0->info()->tensor_shape()
1531  << " Input1 shape: " << input1->info()->tensor_shape()
1532  << " Output shape: " << output->info()->tensor_shape()
1533  << " PriorBoxLayer info: " << prior_info
1534  << std::endl);
1535 
1536  return std::move(func);
1537 }
1538 
1539 /** Create a backend quantization layer function
1540  *
1541  * @tparam QuantizationLayerFunction Backend quantization function
1542  * @tparam TargetInfo Target-specific information
1543  *
1544  * @param[in] node Node to create the backend function for
1545  *
1546  * @return Backend quantization layer function
1547  */
1548 template <typename QuantizationLayerFunction, typename TargetInfo>
1549 std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1550 {
1551  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1552 
1553  // Extract IO and info
1554  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1555  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1556  ARM_COMPUTE_ERROR_ON(input == nullptr);
1557  ARM_COMPUTE_ERROR_ON(output == nullptr);
1558 
1559  // Create and configure function
1560  auto func = std::make_unique<QuantizationLayerFunction>();
1561  func->configure(input, output);
1562 
1563  // Log info
1564  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1565  << node.name()
1566  << " Type: " << node.type()
1567  << " Target: " << TargetInfo::TargetType
1568  << " Data Type: " << input->info()->data_type()
1569  << " Input shape: " << input->info()->tensor_shape()
1570  << " Output shape: " << output->info()->tensor_shape()
1571  << std::endl);
1572 
1573  return std::move(func);
1574 }
1575 
1576 /** Create a backend reduction operation layer function
1577  *
1578  * @tparam ReductionOperationFunction Backend reduction operation function
1579  * @tparam TargetInfo Target-specific information
1580  *
1581  * @param[in] node Node to create the backend function for
1582  * @param[in] ctx Graph context
1583  *
1584  * @return Backend reduction sum layer function
1585  */
1586 template <typename ReductionOperationFunction, typename TargetInfo>
1588 {
1589  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1590 
1591  // Extract IO and info
1592  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1593  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1594  ReductionOperation op = node.op();
1595  int axis = node.axis();
1596  bool keep_dims = node.keep_dims();
1597  ARM_COMPUTE_ERROR_ON(input == nullptr);
1598  ARM_COMPUTE_ERROR_ON(output == nullptr);
1599 
1600  // Create and configure function
1601  auto func = std::make_unique<ReductionOperationFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1602  func->configure(input, output, axis, op, keep_dims);
1603 
1604  // Log info
1605  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1606  << node.name()
1607  << " Type: " << node.type()
1608  << " Target: " << TargetInfo::TargetType
1609  << " Data Type: " << input->info()->data_type()
1610  << " Input shape: " << input->info()->tensor_shape()
1611  << " Output shape: " << output->info()->tensor_shape()
1612  << " Operation: " << op
1613  << " Axis: " << axis
1614  << " Keep dimensions:" << keep_dims
1615  << std::endl);
1616 
1617  return std::move(func);
1618 }
1619 
1620 /** Create a backend reorg layer function
1621  *
1622  * @tparam ReorgLayerFunction Backend reorg function
1623  * @tparam TargetInfo Target-specific information
1624  *
1625  * @param[in] node Node to create the backend function for
1626  *
1627  * @return Backend reshape layer function
1628  */
1629 template <typename ReorgLayerFunction, typename TargetInfo>
1630 std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1631 {
1632  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1633 
1634  // Extract IO and info
1635  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1636  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1637  ARM_COMPUTE_ERROR_ON(input == nullptr);
1638  ARM_COMPUTE_ERROR_ON(output == nullptr);
1639 
1640  // Create and configure function
1641  auto func = std::make_unique<ReorgLayerFunction>();
1642  func->configure(input, output, node.stride());
1643 
1644  // Log info
1645  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1646  << node.name()
1647  << " Type: " << node.type()
1648  << " Target: " << TargetInfo::TargetType
1649  << " Data Type: " << input->info()->data_type()
1650  << " Input shape: " << input->info()->tensor_shape()
1651  << " Output shape: " << output->info()->tensor_shape()
1652  << std::endl);
1653 
1654  return std::move(func);
1655 }
1656 
1657 /** Create a backend reshape layer function
1658  *
1659  * @tparam ReshapeLayerFunction Backend reshape function
1660  * @tparam TargetInfo Target-specific information
1661  *
1662  * @param[in] node Node to create the backend function for
1663  *
1664  * @return Backend reshape layer function
1665  */
1666 template <typename ReshapeLayerFunction, typename TargetInfo>
1667 std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1668 {
1669  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1670 
1671  // Extract IO and info
1672  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1673  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1674  ARM_COMPUTE_ERROR_ON(input == nullptr);
1675  ARM_COMPUTE_ERROR_ON(output == nullptr);
1676 
1677  // Create and configure function
1678  auto func = std::make_unique<ReshapeLayerFunction>();
1679  func->configure(input, output);
1680 
1681  // Log info
1682  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1683  << node.name()
1684  << " Type: " << node.type()
1685  << " Target: " << TargetInfo::TargetType
1686  << " Data Type: " << input->info()->data_type()
1687  << " Input shape: " << input->info()->tensor_shape()
1688  << " Output shape: " << output->info()->tensor_shape()
1689  << std::endl);
1690 
1691  return std::move(func);
1692 }
1693 
1694 /** Create a backend resize layer function
1695  *
1696  * @tparam ResizeLayerFunction Backend resize function
1697  * @tparam TargetInfo Target-specific information
1698  *
1699  * @param[in] node Node to create the backend function for
1700  *
1701  * @return Backend resize layer function
1702  */
1703 template <typename ResizeLayerFunction, typename TargetInfo>
1704 std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1705 {
1706  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1707 
1708  // Extract IO and info
1709  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1710  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1711  ARM_COMPUTE_ERROR_ON(input == nullptr);
1712  ARM_COMPUTE_ERROR_ON(output == nullptr);
1713  const InterpolationPolicy policy = node.policy();
1714 
1715  // Create and configure function
1716  auto func = std::make_unique<ResizeLayerFunction>();
1717  func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false });
1718 
1719  // Log info
1720  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1721  << node.name()
1722  << " Type: " << node.type()
1723  << " Target: " << TargetInfo::TargetType
1724  << " Data Type: " << input->info()->data_type()
1725  << " Input shape: " << input->info()->tensor_shape()
1726  << " Output shape: " << output->info()->tensor_shape()
1727  << " Interpolation: " << policy
1728  << std::endl);
1729 
1730  return std::move(func);
1731 }
1732 
1733 /** Create a backend ROI align layer function
1734  *
1735  * @tparam ROIAlignLayerFunction ROI Align function
1736  * @tparam TargetInfo Target-specific information
1737  *
1738  * @param[in] node Node to create the backend function for
1739  *
1740  * @return ROI Align layer function
1741  */
1742 template <typename ROIAlignLayerFunction, typename TargetInfo>
1743 std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1744 {
1745  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1746 
1747  // Extract IO and info
1748  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1749  typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1750  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1751  ARM_COMPUTE_ERROR_ON(input == nullptr);
1752  ARM_COMPUTE_ERROR_ON(output == nullptr);
1753  ARM_COMPUTE_ERROR_ON(rois == nullptr);
1754 
1755  const ROIPoolingLayerInfo pool_info = node.pooling_info();
1756 
1757  // Create and configure function
1758  auto func = std::make_unique<ROIAlignLayerFunction>();
1759 
1760  func->configure(input, rois, output, pool_info);
1761 
1762  // Log info
1763  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1764  << node.name()
1765  << " Type: " << node.type()
1766  << " Target: " << TargetInfo::TargetType
1767  << " Data Type: " << input->info()->data_type()
1768  << " Input shape: " << input->info()->tensor_shape()
1769  << " Output shape: " << output->info()->tensor_shape()
1770  << " ROIs shape: " << rois->info()->tensor_shape()
1771  << " ROIPooling width: " << pool_info.pooled_width()
1772  << " ROIPooling height: " << pool_info.pooled_height()
1773  << std::endl);
1774 
1775  return std::move(func);
1776 }
1777 
1778 /** Create a backend slice layer function
1779  *
1780  * @tparam SliceLayerFunction Backend slice function
1781  * @tparam TargetInfo Target-specific information
1782  *
1783  * @param[in] node Node to create the backend function for
1784  *
1785  * @return Backend slice layer function
1786  */
1787 template <typename SliceLayerFunction, typename TargetInfo>
1788 std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1789 {
1790  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1791 
1792  // Extract IO and info
1793  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1794  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1795  ARM_COMPUTE_ERROR_ON(input == nullptr);
1796  ARM_COMPUTE_ERROR_ON(output == nullptr);
1797 
1798  // Create and configure function
1799  auto func = std::make_unique<SliceLayerFunction>();
1800  func->configure(input, output, node.starts(), node.ends());
1801 
1802  // Log info
1803  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1804  << node.name()
1805  << " Type: " << node.type()
1806  << " Target: " << TargetInfo::TargetType
1807  << " Data Type: " << input->info()->data_type()
1808  << " Input shape: " << input->info()->tensor_shape()
1809  << " Output shape: " << output->info()->tensor_shape()
1810  << std::endl);
1811 
1812  return std::move(func);
1813 }
1814 
1815 /** Create a backend softmax layer function
1816  *
1817  * @tparam SoftmaxLayerFunction Backend softmax function
1818  * @tparam TargetInfo Target-specific information
1819  *
1820  * @param[in] node Node to create the backend function for
1821  * @param[in] ctx Graph context
1822  *
1823  * @return Backend softmax layer function
1824  */
1825 template <typename SoftmaxLayerFunction, typename TargetInfo>
1826 std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1827 {
1828  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1829 
1830  // Extract IO and info
1831  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1832  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1833  const float beta = node.beta();
1834  ARM_COMPUTE_ERROR_ON(input == nullptr);
1835  ARM_COMPUTE_ERROR_ON(output == nullptr);
1836 
1837  // Create and configure function
1838  auto func = std::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1839  func->configure(input, output, beta);
1840 
1841  // Log info
1842  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1843  << node.name()
1844  << " Type: " << node.type()
1845  << " Target: " << TargetInfo::TargetType
1846  << " Data Type: " << input->info()->data_type()
1847  << " Input shape: " << input->info()->tensor_shape()
1848  << " Output shape: " << output->info()->tensor_shape()
1849  << std::endl);
1850 
1851  return std::move(func);
1852 }
1853 
1854 /** Create a backend layer stack function
1855  *
1856  * @tparam StackLayerFunction Backend stack function
1857  * @tparam TargetInfo Target-specific information
1858  *
1859  * @param[in] node Node to create the backend function for
1860  *
1861  * @return Backend stack layer function
1862  */
1863 template <typename StackLayerFunction, typename TargetInfo>
1864 std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1865 {
1866  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1867  ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1868 
1869  // Extract IO and info
1870  std::vector<typename TargetInfo::TensorType *> inputs;
1871  for(unsigned int i = 0; i < node.num_inputs(); ++i)
1872  {
1873  inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1874  }
1875  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1876  const int axis = node.axis();
1877 
1878  // Create and configure function
1879  auto func = std::make_unique<StackLayerFunction>();
1880  func->configure(inputs, axis, output);
1881 
1882  // Log info
1883  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1884  << node.name()
1885  << " Type: " << node.type()
1886  << " Target: " << TargetInfo::TargetType
1887  << " Data Type: " << output->info()->data_type()
1888  << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1889  << " Output shape: " << output->info()->tensor_shape()
1890  << " Num Inputs: " << inputs.size()
1891  << " Axis: " << axis
1892  << std::endl);
1893 
1894  return std::move(func);
1895 }
1896 
1897 /** Create a backend slice layer function
1898  *
1899  * @tparam StridedSliceLayerFunction Backend strided slice function
1900  * @tparam TargetInfo Target-specific information
1901  *
1902  * @param[in] node Node to create the backend function for
1903  *
1904  * @return Backend strided slice layer function
1905  */
1906 template <typename StridedSliceLayerFunction, typename TargetInfo>
1907 std::unique_ptr<IFunction> create_strided_slice_layer(StridedSliceLayerNode &node)
1908 {
1909  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1910 
1911  // Extract IO and info
1912  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1913  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1914  Coordinates starts = node.starts();
1915  Coordinates ends = node.ends();
1916  BiStrides strides = node.strides();
1917  StridedSliceLayerInfo info = node.strided_slice_info();
1918 
1919  ARM_COMPUTE_ERROR_ON(input == nullptr);
1920  ARM_COMPUTE_ERROR_ON(output == nullptr);
1921 
1922  // Create and configure function
1923  auto func = std::make_unique<StridedSliceLayerFunction>();
1924  func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
1925 
1926  // Log info
1927  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1928  << node.name()
1929  << " Type: " << node.type()
1930  << " Target: " << TargetInfo::TargetType
1931  << " Data Type: " << input->info()->data_type()
1932  << " Input shape: " << input->info()->tensor_shape()
1933  << " Output shape: " << output->info()->tensor_shape()
1934  << std::endl);
1935 
1936  return std::move(func);
1937 }
1938 } // namespace detail
1939 } // namespace backends
1940 } // namespace graph
1941 } // namespace arm_compute
1942 
1943 #endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */
std::unique_ptr< IFunction > create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
Create a backend normalization layer function.
std::unique_ptr< IFunction > create_dequantization_layer(DequantizationLayerNode &node)
Create a backend dequantize layer function.
std::string name() const
Returns node&#39;s name.
Definition: INode.cpp:108
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
InterpolationPolicy
Interpolation method.
Definition: Types.h:398
std::unique_ptr< IFunction > create_arg_min_max_layer(ArgMinMaxLayerNode &node)
Creates a backend argminmax layer function.
Generate Proposals Information class.
Definition: Types.h:1311
experimental::PostOpList< ITensorInfo * > post_ops
EltwiseOperation
Supported Element-wise operations.
Definition: Types.h:109
std::unique_ptr< IFunction > create_slice_layer(SliceLayerNode &node)
Create a backend slice layer function.
std::unique_ptr< IFunction > create_eltwise_layer(EltwiseLayerNode &node)
Create a backend element-wise operation layer function.
bool enabled() const
Check if initialised.
Definition: Types.h:1559
std::vector< PaddingInfo > PaddingList
List of padding information.
Definition: Types.h:440
ReductionOperation
Available reduction operations.
Definition: Types.h:463
DataLayoutDimension concatenation_axis() const
Concatenation axis parameter accessor.
ITensorHandle * handle()
Backend tensor handle accessor.
Definition: Tensor.cpp:55
std::unique_ptr< IFunction > create_batch_normalization_layer(BatchNormalizationLayerNode &node)
Create a backend batch normalization layer function.
std::unique_ptr< IFunction > create_depth_to_space_layer(DepthToSpaceLayerNode &node)
Create a backend depth to space layer function.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
std::unique_ptr< IFunction > create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Create a backend batch normalization layer function.
Target assigned_target() const
Returns assigned target for this node.
Definition: INode.cpp:199
TensorType
Memory type.
Definition: Types.h:38
std::unique_ptr< IFunction > create_prelu_layer(PReluLayerNode &node)
Create a backend PRelu layer function.
std::unique_ptr< IFunction > create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
Create a backend layer depth-wise convolution function.
size_t num_outputs() const
Returns number of outputs of the node.
Definition: INode.cpp:184
std::unique_ptr< IFunction > create_resize_layer(ResizeLayerNode &node)
Create a backend resize layer function.
Wrapper function to first apply {NE, CL}BatchNormalizationLayer on the weights and then run {NE...
Normalization Layer Information class.
Definition: Types.h:1610
std::unique_ptr< IFunction > create_permute_layer(PermuteLayerNode &node)
Create a backend permute layer function.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
std::unique_ptr< IFunction > create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
Create a backend generate proposals layer function.
NodeType type() const override
Returns node&#39;s type.
std::unique_ptr< IFunction > create_l2_normalize_layer(L2NormalizeLayerNode &node, GraphContext &ctx)
Create a backend l2 normalization layer function.
Fully connected layer info.
Definition: Types.h:1572
Fast math enabled for Convolution layer.
std::unique_ptr< IFunction > create_flatten_layer(FlattenLayerNode &node)
Create a backend flatten layer function.
unsigned int pooled_width() const
Get the pooled width of the layer.
Definition: Types.h:1283
Wrapper function to first apply {NE, CL}BatchNormalizationLayer on the weights and then run {NE...
std::unique_ptr< IFunction > create_pad_layer(PadLayerNode &node)
Create a backend pad layer function.
Activation Layer Information class.
Definition: Types.h:1509
bool enable_fast_math
Enable fast math computation.
Definition: Types.h:1581
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::unique_ptr< arm_compute::IFunction > create_stack_layer(StackLayerNode &node)
Create a backend layer stack function.
Copyright (c) 2017-2021 Arm Limited.
Samples are taken at pixel center.
Convolution Layer Weights Information class.
Definition: Types.h:1728
std::unique_ptr< IFunction > create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
Create a backend softmax layer function.
1 channel, 1 S32 per channel
TensorDescriptor & desc()
TensorInfo metadata accessor.
Definition: Tensor.cpp:40
std::unique_ptr< IFunction > create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Create a backend fused depthwise convolution batch normalization layer function.
Node interface.
Definition: INode.h:46
std::unique_ptr< IFunction > create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
Create a backend convolution layer function.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
std::unique_ptr< IFunction > create_roi_align_layer(ROIAlignLayerNode &node)
Create a backend ROI align layer function.
bool is_enabled() const
Enabled parameter accessor.
Tensor * output(size_t idx) const
Returns the tensor of a given output of the node.
Definition: INode.cpp:159
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
const unsigned int num_groups
Definition: Im2Col.cpp:153
Coordinates of an item.
Definition: Coordinates.h:37
Pooling Layer Information struct.
Definition: Types.h:1173
void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
std::unique_ptr< IFunction > create_reduction_operation_layer(ReductionLayerNode &node, GraphContext &ctx)
Create a backend reduction operation layer function.
std::unique_ptr< IFunction > create_priorbox_layer(PriorBoxLayerNode &node)
Create a backend priorbox layer function.
NodeID id() const
Returns node&#39;s ID.
Definition: INode.cpp:103
PriorBox layer info.
Definition: Types.h:798
std::unique_ptr< IFunction > create_detection_output_layer(DetectionOutputLayerNode &node)
Create a backend detection output layer function.
Padding and stride information class.
Definition: Types.h:656
std::string to_string(const T &val)
Fallback method: try to use std::to_string:
Definition: TypePrinter.h:79
Bounding Box Transform information class.
Definition: Types.h:1442
std::unique_ptr< IFunction > create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
Create a backend detection post process layer function.
Tensor handle interface object.
Definition: ITensorHandle.h:38
std::unique_ptr< IFunction > create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
Create a backend deconvolution layer function.
std::unique_ptr< IFunction > create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
Create a backend unary element-wise operation layer function.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1003
bool is_in_place_operation(void *input, void *output)
Checks if an operation is in place.
Definition: Utils.h:77
Strides of an item in bytes.
Definition: Strides.h:37
std::unique_ptr< IFunction > create_print_layer(PrintLayerNode &node)
Create a backend print layer function.
Detection Output layer info.
Definition: Types.h:935
DetectionPostProcess Layer node.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
std::unique_ptr< IFunction > create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
Create a backend fully connected layer function.
std::unique_ptr< IFunction > create_activation_layer(ActivationLayerNode &node)
Creates a backend activation layer function.
int axis() const
Stack axis parameter accessor.
int32_t shrink_axis_mask() const
Definition: Types.h:1716
std::unique_ptr< IFunction > create_strided_slice_layer(StridedSliceLayerNode &node)
Create a backend slice layer function.
unsigned int pooled_height() const
Get the pooled height of the layer.
Definition: Types.h:1288
ROI Pooling Layer Information class.
Definition: Types.h:1268
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
std::unique_ptr< IFunction > create_quantization_layer(QuantizationLayerNode &node)
Create a backend quantization layer function.
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89
std::unique_ptr< IFunction > create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
Create a backend normalize planar YUV layer function.
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
ConvolutionMethod
Supported Convolution layer methods.
Definition: Types.h:126
std::unique_ptr< IFunction > create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
Create a backend bounding box transform layer function.
Detection Output layer info.
Definition: Types.h:1054
size_t num_inputs() const
Returns number of inputs of the node.
Definition: INode.cpp:179
TargetInfo::TensorType * get_backing_tensor(arm_compute::graph::Tensor *tensor)
Returns backing tensor of a given tensor.
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1544
virtual NodeType type() const =0
Returns node&#39;s type.
std::shared_ptr< IWeightsManager > get_weights_manager(GraphContext &ctx, Target target)
Returns the weights manager for a given target.
Definition: Utils.h:102
Tensor * input(size_t idx) const
Returns the tensor of a given input of the node.
Definition: INode.cpp:151
NodeType type() const override
Returns node&#39;s type.
std::unique_ptr< IFunction > create_fused_convolution_with_post_op(FusedConvolutionWithPostOpNode &node, GraphContext &ctx)
Create a backend convolution layer function with post opreator.
Convolution node.
std::unique_ptr< IFunction > create_pooling_layer(PoolingLayerNode &node)
Create a backend pooling layer function.
DataLayout
[DataLayout enum definition]
Definition: Types.h:113
std::unique_ptr< arm_compute::IFunction > create_concatenate_layer(ConcatenateLayerNode &node)
Create a backend layer concatenate function.
A sequence of PostOps that can be appended to the end of other operators.
Definition: IPostOp.h:119
ConvertPolicy
Policy to handle integer overflow.
Definition: Types.h:391
std::unique_ptr< IFunction > create_reshape_layer(ReshapeLayerNode &node)
Create a backend reshape layer function.
size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get index of a tensor&#39;s given dimension depending on its layout.
Definition: Utils.cpp:148
std::unique_ptr< IFunction > create_reorg_layer(ReorgLayerNode &node)
Create a backend reorg layer function.
Tensor object.
Definition: Tensor.h:41
std::unique_ptr< IFunction > create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
Create a backend channel shuffle layer function.
UnaryEltwiseOperation
Supported Unary Element-wise operations.
Definition: Types.h:120