Compute Library
 20.08
FunctionHelpers.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25 #define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
26 
36 
37 #include "arm_compute/core/Error.h"
41 
42 namespace arm_compute
43 {
44 namespace graph
45 {
46 namespace backends
47 {
48 namespace detail
49 {
50 // Address rule DR-9R5 (1579. Return by converting move constructor)
51 #if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5))
52 #define RETURN_UNIQUE_PTR(x) (x)
53 #else /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
54 #define RETURN_UNIQUE_PTR(x) (std::move(x))
55 #endif /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
56 
57 /** Returns backing tensor of a given tensor
58  *
59  * @tparam TargetInfo Target information
60  *
61  * @param[in] tensor Tensor to extract the backing tensor from
62  *
63  * @return Backing tensor if present else nullptr
64  */
65 template <typename TargetInfo>
67 {
68  typename TargetInfo::TensorType *backing_tensor = nullptr;
69  if(tensor != nullptr)
70  {
71  ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
72  // Get backing tensor handle
73  ITensorHandle *tensor_handle = tensor->handle();
74  // Get backing tensor
75  backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
76  }
77 
78  return backing_tensor;
79 }
80 
81 template <typename TargetInfo>
82 void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
83 {
84  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
85  << " Target: " << TargetInfo::TargetType
86  << " ID: " << node.id()
87  << node.name()
88  << std::endl);
89 
90  ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
91  ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
92  ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
93  ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
94 }
95 
96 /** Creates a backend activation layer function
97  *
98  * @tparam ActivationLayerFunction Backend activation function
99  * @tparam TargetInfo Target-specific information
100  *
101  * @param[in] node Node to create the backend function for
102  *
103  * @return Backend activation layer function
104  */
105 template <typename ActivationLayerFunction, typename TargetInfo>
106 std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
107 {
108  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
109 
110  // Extract IO and info
111  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
112  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
113  const ActivationLayerInfo act_info = node.activation_info();
114 
115  // Create function
116  auto func = support::cpp14::make_unique<ActivationLayerFunction>();
117  func->configure(input, output, act_info);
118 
119  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
120  << node.name()
121  << " Type: " << node.type()
122  << " Target: " << TargetInfo::TargetType
123  << " Data Type: " << input->info()->data_type()
124  << " Shape: " << input->info()->tensor_shape()
125  << " Activation function: " << act_info.activation()
126  << " a: " << act_info.a()
127  << " b: " << act_info.b()
128  << " InPlace : " << is_in_place_operation(input, output)
129  << std::endl);
130 
131  return RETURN_UNIQUE_PTR(func);
132 }
133 
134 /** Create a backend batch normalization layer function
135  *
136  * @tparam BatchNormalizationLayerFunction Backend batch normalization function
137  * @tparam TargetInfo Target-specific information
138  *
139  * @param[in] node Node to create the backend function for
140  *
141  * @return Backend batch normalization layer function
142  */
143 template <typename BatchNormalizationLayerFunction, typename TargetInfo>
145 {
146  validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
147 
148  // Extract IO and info
149  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
150  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
151  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
152  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
153  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
154 
155  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
156  const float epsilon = node.epsilon();
157  const ActivationLayerInfo fused_act = node.fused_activation();
158 
159  // Create and configure function
160  auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
161  func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
162 
163  // Log info
164  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
165  << node.name()
166  << " Type: " << node.type()
167  << " Target: " << TargetInfo::TargetType
168  << " Data Type: " << input->info()->data_type()
169  << " Shape: " << input->info()->tensor_shape()
170  << " Epsilon: " << epsilon << " "
171  << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
172  << " InPlace: " << is_in_place_operation(input, output)
173  << std::endl);
174 
175  return RETURN_UNIQUE_PTR(func);
176 }
177 
178 /** Create a backend batch normalization layer function
179  *
180  * @tparam BatchNormalizationLayerFunction Backend batch normalization function
181  * @tparam TargetInfo Target-specific information
182  *
183  * @param[in] node Node to create the backend function for
184  * @param[in] ctx Graph context
185  *
186  * @return Backend batch normalization layer function
187  */
188 template <typename FusedLayerTypes, typename TargetInfo>
190 {
191  validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
192 
193  // Extract IO and info
194  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
195  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
196  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
197  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
198  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
199  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
200  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
201 
202  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
203 
205  const unsigned int num_groups = node.num_groups();
206  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
207  const ActivationLayerInfo fused_act = node.fused_activation();
208  const float epsilon = node.epsilon();
209 
210  // Create and configure function (we assume that functions have been validated before creation)
211  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
212  std::unique_ptr<IFunction> func;
213  std::string func_name;
214 
216 
217  // Create and configure function
218  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
219  std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
220 
221  // Log info
222  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
223  << node.name()
224  << " Type: " << node.type()
225  << " Target: " << TargetInfo::TargetType
226  << " Data Type: " << input->info()->data_type()
227  << " Input shape: " << input->info()->tensor_shape()
228  << " Weights shape: " << weights->info()->tensor_shape()
229  << " Output shape: " << output->info()->tensor_shape()
230  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
231  << std::endl);
232  return RETURN_UNIQUE_PTR(func);
233 }
234 
235 /** Create a backend fused depthwise convolution batch normalization layer function
236  *
237  * @tparam FusedLayerTypes Fused layer types
238  * @tparam TargetInfo Target-specific information
239  *
240  * @param[in] node Node to create the backend function for
241  * @param[in] ctx Graph context
242  *
243  * @return Backend fused depthwise convolution batch normalization layer function
244  */
245 template <typename FusedLayerTypes, typename TargetInfo>
247 {
248  validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
249 
250  // Extract IO and info
251  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
252  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
253  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
254  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
255  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
256  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
257  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
258 
259  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
260 
262  const unsigned int depth_multiplier = node.depth_multiplier();
263  const ActivationLayerInfo fused_act = node.fused_activation();
264  const float epsilon = node.epsilon();
265 
266  // Create and configure function (we assume that functions have been validated before creation)
267  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
268  std::unique_ptr<IFunction> func;
269  std::string func_name;
270 
272 
273  // Create and configure function
274  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
275  std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
276 
277  // Log info
278  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
279  << node.name()
280  << " Type: " << node.type()
281  << " Target: " << TargetInfo::TargetType
282  << " Data Type: " << input->info()->data_type()
283  << " Input shape: " << input->info()->tensor_shape()
284  << " Weights shape: " << weights->info()->tensor_shape()
285  << " Output shape: " << output->info()->tensor_shape()
286  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
287  << std::endl);
288  return RETURN_UNIQUE_PTR(func);
289 }
290 
291 /** Create a backend bounding box transform layer function
292  *
293  * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
294  * @tparam TargetInfo Target-specific information
295  *
296  * @param[in] node Node to create the backend function for
297  *
298  * @return Backend bounding box transform layer function
299  */
300 template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
302 {
303  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
304 
305  // Extract IO and info
306  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
307  typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
308  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
309  const BoundingBoxTransformInfo bbox_info = node.info();
310 
311  // Create and configure function
312  auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
313  func->configure(input, output, deltas, bbox_info);
314 
315  // Log info
316  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
317  << node.name()
318  << " Type: " << node.type()
319  << " Target: " << TargetInfo::TargetType
320  << " Data Type: " << input->info()->data_type()
321  << " Shape: " << input->info()->tensor_shape()
322  << " BoundingBox Info img W: " << bbox_info.img_width() << " "
323  << " BoundingBox Info img H: " << bbox_info.img_height() << " "
324  << std::endl);
325 
326  return RETURN_UNIQUE_PTR(func);
327 }
328 
329 /** Create a backend channel shuffle layer function
330  *
331  * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
332  * @tparam TargetInfo Target-specific information
333  *
334  * @param[in] node Node to create the backend function for
335  *
336  * @return Backend channel shuffle layer function
337  */
338 template <typename ChannelShuffleLayerFunction, typename TargetInfo>
339 std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
340 {
341  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
342 
343  // Extract IO and info
344  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
345  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
346  const unsigned int num_groups = node.num_groups();
347 
348  // Create function
349  auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
350  func->configure(input, output, num_groups);
351 
352  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
353  << node.name()
354  << " Type: " << node.type()
355  << " Target: " << TargetInfo::TargetType
356  << " Data Type: " << input->info()->data_type()
357  << " Shape: " << input->info()->tensor_shape()
358  << " Num groups: " << num_groups
359  << std::endl);
360 
361  return RETURN_UNIQUE_PTR(func);
362 }
363 
364 /** Create a backend layer concatenate function
365  *
366  * @tparam ConcatenateLayerFunction Backend concatenate function
367  * @tparam TargetInfo Target-specific information
368  *
369  * @param[in] node Node to create the backend function for
370  *
371  * @return Backend concatenate layer function
372  */
373 template <typename ConcatenateLayerFunction, typename TargetInfo>
374 std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
375 {
376  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
377  ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
378 
379  // Return nullptr if depth concatenate is switched off
380  if(!node.is_enabled())
381  {
382  return nullptr;
383  }
384 
385  // Extract IO and info
386  std::vector<typename TargetInfo::SrcTensorType *> inputs;
387  for(unsigned int i = 0; i < node.num_inputs(); ++i)
388  {
389  inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
390  }
391  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
392  const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
393  const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
394 
395  // Create and configure function
396  auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
397  func->configure(inputs, output, concat_axis);
398 
399  // Log info
400  const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
401  std::ostringstream qss;
402  if(is_quantized)
403  {
404  qss << " Output QuantInfo: " << output->info()->quantization_info();
405  }
406  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
407  << node.name()
408  << " Type: " << node.type()
409  << " Target: " << TargetInfo::TargetType
410  << " Data Type: " << output->info()->data_type()
411  << " Shape: " << output->info()->tensor_shape()
412  << " Num Inputs: " << inputs.size()
413  << " Axis: " << concat_axis
414  << qss.str()
415  << std::endl);
416 
417  return RETURN_UNIQUE_PTR(func);
418 }
419 
420 /** Create a backend convolution layer function
421  *
422  * @tparam ConvolutionLayerFunctions Backend convolution functions
423  * @tparam TargetInfo Target-specific information
424  *
425  * @param[in] node Node to create the backend function for
426  * @param[in] ctx Graph context
427  *
428  * @return Backend convolution layer function
429  */
430 template <typename ConvolutionLayerFunctions, typename TargetInfo>
431 std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
432 {
433  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
434 
435  // Extract IO and info
436  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
437  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
438  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
439  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
440 
441  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
442 
443  if(is_quantized)
444  {
445  biases->info()->set_data_type(DataType::S32);
446  }
447 
449  const unsigned int num_groups = node.num_groups();
450  const ConvolutionMethod conv_algorithm = node.convolution_method();
451  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
452  const ActivationLayerInfo fused_act = node.fused_activation();
453 
454  // Create and configure function (we assume that functions have been validated before creation)
455  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
456  std::unique_ptr<IFunction> func;
457  std::string func_name;
458 
459  if(conv_algorithm == ConvolutionMethod::Winograd)
460  {
461  ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
462  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
463  std::string("WinogradConvolutionLayer"), mm,
464  input, weights, biases, output, conv_info, fused_act, fast_math);
465  }
466  else if(conv_algorithm == ConvolutionMethod::Direct)
467  {
468  ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
469  std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
470  std::string("DirectConvolutionLayer"),
471  input, weights, biases, output, conv_info, fused_act);
472  }
473  else if(conv_algorithm == ConvolutionMethod::GEMM)
474  {
475  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
476  std::string("GEMMConvolutionLayer"), mm,
477  input, weights, biases, output, conv_info,
478  WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
479  }
480  else
481  {
482  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
483  std::string("GenericConvolutionLayer"), mm,
484  input, weights, biases, output, conv_info,
485  WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
486  }
487 
488  // Log info
489  std::ostringstream qss;
490  if(is_quantized)
491  {
492  qss << " Input QuantInfo: " << input->info()->quantization_info()
493  << " Weights QuantInfo: " << weights->info()->quantization_info()
494  << " Output QuantInfo: " << output->info()->quantization_info();
495  }
496  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
497  << node.name()
498  << " Type: " << func_name
499  << " Target: " << TargetInfo::TargetType
500  << " Data Type: " << input->info()->data_type()
501  << " Groups: " << num_groups
502  << " Input shape: " << input->info()->tensor_shape()
503  << " Weights shape: " << weights->info()->tensor_shape()
504  << " Output shape: " << output->info()->tensor_shape()
505  << qss.str()
506  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
507  << std::endl);
508  return RETURN_UNIQUE_PTR(func);
509 }
510 
511 /** Create a backend deconvolution layer function
512  *
513  * @tparam DeconvolutionLayerFunction Backend deconvolution function
514  * @tparam TargetInfo Target-specific information
515  *
516  * @param[in] node Node to create the backend function for
517  * @param[in] ctx Graph context
518  *
519  * @return Backend deconvolution layer function
520  */
521 template <typename DeconvolutionLayerFunction, typename TargetInfo>
522 std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
523 {
524  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
525 
526  // Extract IO and info
527  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
528  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
529  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
530  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
531 
532  const PadStrideInfo deconv_info = node.deconvolution_info();
533 
534  // Create and configure function (we assume that functions have been validated before creation)
535  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
536  std::unique_ptr<IFunction> func;
537 
538  std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
539  std::string(), mm,
540  input, weights, biases, output, deconv_info);
541 
542  // Log info
543  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
544  << node.name()
545  << " Type: " << node.type()
546  << " Target: " << TargetInfo::TargetType
547  << " Data Type: " << input->info()->data_type()
548  << " Input shape: " << input->info()->tensor_shape()
549  << " Weights shape: " << weights->info()->tensor_shape()
550  << " Output shape: " << output->info()->tensor_shape()
551  << std::endl);
552  return func;
553 }
554 
555 /** Create a backend layer depth-wise convolution function
556  *
557  * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
558  * @tparam TargetInfo Target-specific information
559  *
560  * @param[in] node Node to create the backend function for
561  *
562  * @return Backend depth-wise convolution layer function
563  */
564 template <typename DepthwiseConvolutionLayer, typename TargetInfo>
566 {
567  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
568 
569  // Extract IO and info
570  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
571  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
572  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
573  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
574 
575  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
576 
577  if(is_quantized)
578  {
579  biases->info()->set_data_type(DataType::S32);
580  }
581 
583  const unsigned int depth_multiplier = node.depth_multiplier();
584  const ActivationLayerInfo fused_act = node.fused_activation();
585 
586  // Create and configure function (we assume that functions have been validated before creation)
587  std::unique_ptr<IFunction> func;
588  std::string func_name;
589 
590  std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
591  std::string("DepthwiseConvolutionLayer"),
592  input, weights, biases, output, conv_info, depth_multiplier, fused_act);
593 
594  // Log info
595  std::ostringstream qss;
596  if(is_quantized)
597  {
598  qss << " Input QuantInfo: " << input->info()->quantization_info()
599  << " Weights QuantInfo: " << weights->info()->quantization_info()
600  << " Output QuantInfo: " << output->info()->quantization_info();
601  }
602  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
603  << node.name()
604  << " Type: " << func_name
605  << " Target: " << TargetInfo::TargetType
606  << " Data Type: " << input->info()->data_type()
607  << " Input shape: " << input->info()->tensor_shape()
608  << " Weights shape: " << weights->info()->tensor_shape()
609  << " Output shape: " << output->info()->tensor_shape()
610  << " Depth multiplier: " << depth_multiplier
611  << qss.str()
612  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
613  << std::endl);
614  return RETURN_UNIQUE_PTR(func);
615 }
616 
617 /** Create a backend dequantize layer function
618  *
619  * @tparam DequantizationLayer Function Backend dequantize function
620  * @tparam TargetInfo Target-specific information
621  *
622  * @param[in] node Node to create the backend function for
623  *
624  * @return Backend dequantize layer function
625  */
626 template <typename DequantizationLayerFunction, typename TargetInfo>
627 std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
628 {
629  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
630 
631  // Extract IO and info
632  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
633  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
634 
635  ARM_COMPUTE_ERROR_ON(input == nullptr);
636  ARM_COMPUTE_ERROR_ON(output == nullptr);
637 
638  // Create and configure function
639  auto func = support::cpp14::make_unique<DequantizationLayerFunction>();
640  func->configure(input, output);
641 
642  // Log info
643  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
644  << node.name()
645  << " Type: " << node.type()
646  << " Target: " << TargetInfo::TargetType
647  << " Data Type: " << input->info()->data_type()
648  << " Input shape: " << input->info()->tensor_shape()
649  << " Input quantization info: " << output->info()->quantization_info()
650  << " Output shape: " << output->info()->tensor_shape()
651  << std::endl);
652 
653  return RETURN_UNIQUE_PTR(func);
654 }
655 /** Create a backend detection output layer function
656  *
657  * @tparam DetectionOutputLayer Function Backend detection output function
658  * @tparam TargetInfo Target-specific information
659  *
660  * @param[in] node Node to create the backend function for
661  *
662  * @return Backend detection output layer function
663  */
664 template <typename DetectionOutputLayerFunction, typename TargetInfo>
666 {
667  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
668 
669  // Extract IO and info
670  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
671  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
672  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
673  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
674  const DetectionOutputLayerInfo detect_info = node.detection_output_info();
675 
676  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
677  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
678  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
679  ARM_COMPUTE_ERROR_ON(output == nullptr);
680 
681  // Create and configure function
682  auto func = support::cpp14::make_unique<DetectionOutputLayerFunction>();
683  func->configure(input0, input1, input2, output, detect_info);
684 
685  // Log info
686  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
687  << node.name()
688  << " Type: " << node.type()
689  << " Target: " << TargetInfo::TargetType
690  << " Data Type: " << input0->info()->data_type()
691  << " Input0 shape: " << input0->info()->tensor_shape()
692  << " Input1 shape: " << input1->info()->tensor_shape()
693  << " Input2 shape: " << input2->info()->tensor_shape()
694  << " Output shape: " << output->info()->tensor_shape()
695  << " DetectionOutputLayer info: " << detect_info
696  << std::endl);
697 
698  return RETURN_UNIQUE_PTR(func);
699 }
700 
701 /** Create a backend detection post process layer function
702  *
703  * @tparam DetectionPostProcessLayerFunction Backend detection output function
704  * @tparam TargetInfo Target-specific information
705  *
706  * @param[in] node Node to create the backend function for
707  *
708  * @return Backend detection post process layer function
709  */
710 template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
712 {
713  validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
714 
715  // Extract IO and info
716  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
717  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
718  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
719  typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
720  typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
721  typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
722  typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
724 
725  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
726  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
727  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
728  ARM_COMPUTE_ERROR_ON(output0 == nullptr);
729  ARM_COMPUTE_ERROR_ON(output1 == nullptr);
730  ARM_COMPUTE_ERROR_ON(output2 == nullptr);
731  ARM_COMPUTE_ERROR_ON(output3 == nullptr);
732 
733  // Create and configure function
734  auto func = support::cpp14::make_unique<DetectionPostProcessLayerFunction>();
735  func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
736 
737  // Log info
738  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
739  << node.name()
740  << " Type: " << node.type()
741  << " Target: " << TargetInfo::TargetType
742  << " Data Type: " << input0->info()->data_type()
743  << " Input0 shape: " << input0->info()->tensor_shape()
744  << " Input1 shape: " << input1->info()->tensor_shape()
745  << " Input2 shape: " << input2->info()->tensor_shape()
746  << " Output0 shape: " << output0->info()->tensor_shape()
747  << " Output1 shape: " << output1->info()->tensor_shape()
748  << " Output2 shape: " << output2->info()->tensor_shape()
749  << " Output3 shape: " << output3->info()->tensor_shape()
750  << " DetectionPostProcessLayer info: " << detect_info
751  << std::endl);
752 
753  return RETURN_UNIQUE_PTR(func);
754 }
755 
756 /** Create a backend element-wise operation layer function
757  *
758  * @tparam EltwiseFunctions Backend element-wise function
759  * @tparam TargetInfo Target-specific information
760  *
761  * @param[in] node Node to create the backend function for
762  *
763  * @return Backend element-wise operation layer function
764  */
765 template <typename EltwiseFunctions, typename TargetInfo>
766 std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
767 {
768  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
769 
770  // Extract IO and info
771  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
772  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
773  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
774  const EltwiseOperation eltwise_op = node.eltwise_operation();
775  const ConvertPolicy convert_policy = node.convert_policy();
776  const ActivationLayerInfo act_info = node.fused_activation();
777  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
778  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
779  ARM_COMPUTE_ERROR_ON(output == nullptr);
780 
781  std::unique_ptr<IFunction> func = nullptr;
782  std::string func_name;
783  if(eltwise_op == EltwiseOperation::Add)
784  {
785  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
786  std::string("ArithmeticAddition"),
787  input1, input2, output, convert_policy, act_info);
788  }
789  else if(eltwise_op == EltwiseOperation::Sub)
790  {
791  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
792  std::string("ArithmeticSubtraction"),
793  input1, input2, output, convert_policy, act_info);
794  }
795  else if(eltwise_op == EltwiseOperation::Mul)
796  {
797  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
798  std::string("PixelWiseMultiplication"),
799  input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
800  }
801  else
802  {
803  ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
804  }
805 
806  // Log info
807  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
808  << node.name()
809  << " Type: " << node.type()
810  << " Target: " << TargetInfo::TargetType
811  << " Operation: " << func_name
812  << " Data Type: " << input1->info()->data_type()
813  << " Shape: " << input1->info()->tensor_shape()
814  << std::endl);
815 
816  return RETURN_UNIQUE_PTR(func);
817 }
818 
819 /** Create a backend unary element-wise operation layer function
820  *
821  * @tparam UnaryEltwiseFunctions Backend unary element-wise function
822  * @tparam TargetInfo Target-specific information
823  *
824  * @param[in] node Node to create the backend function for
825  *
826  * @return Backend unary element-wise operation layer function
827  */
828 template <typename UnaryEltwiseFunctions, typename TargetInfo>
829 std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
830 {
831  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
832 
833  // Extract IO and info
834  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
835  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
836  const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
837 
838  ARM_COMPUTE_ERROR_ON(input == nullptr);
839  ARM_COMPUTE_ERROR_ON(output == nullptr);
840 
841  std::unique_ptr<IFunction> func = nullptr;
842  std::string func_name;
843  if(eltwise_op == UnaryEltwiseOperation::Exp)
844  {
845  std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
846  std::string("Exp"),
847  input, output);
848  }
849  else
850  {
851  ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
852  }
853 
854  // Log info
855  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
856  << node.name()
857  << " Type: " << node.type()
858  << " Target: " << TargetInfo::TargetType
859  << " Operation: " << func_name
860  << " Data Type: " << input->info()->data_type()
861  << " Shape: " << input->info()->tensor_shape()
862  << std::endl);
863 
864  return RETURN_UNIQUE_PTR(func);
865 }
866 
867 /** Create a backend flatten layer function
868  *
869  * @tparam FlattenLayerFunction Backend flatten function
870  * @tparam TargetInfo Target-specific information
871  *
872  * @param[in] node Node to create the backend function for
873  *
874  * @return Backend flatten layer function
875  */
876 template <typename FlattenLayerFunction, typename TargetInfo>
877 std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
878 {
879  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
880 
881  // Extract IO and info
882  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
883  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
884 
885  ARM_COMPUTE_ERROR_ON(input == nullptr);
886  ARM_COMPUTE_ERROR_ON(output == nullptr);
887 
888  // Create and configure function
889  auto func = support::cpp14::make_unique<FlattenLayerFunction>();
890  func->configure(input, output);
891 
892  // Log info
893  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
894  << node.name()
895  << " Type: " << node.type()
896  << " Target: " << TargetInfo::TargetType
897  << " Data Type: " << input->info()->data_type()
898  << " Input shape: " << input->info()->tensor_shape()
899  << " Output shape: " << output->info()->tensor_shape()
900  << std::endl);
901 
902  return RETURN_UNIQUE_PTR(func);
903 }
904 
905 /** Create a backend fully connected layer function
906  *
907  * @tparam FullyConnectedLayerFunction Backend fully-connected function
908  * @tparam TargetInfo Target-specific information
909  *
910  * @param[in] node Node to create the backend function for
911  * @param[in] ctx Graph context
912  *
913  * @return Backend fully connected layer function
914  */
915 template <typename FullyConnectedLayerFunction, typename TargetInfo>
917 {
918  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
919 
920  // Extract IO and info
921  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
922  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
923  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
924  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
925  const FullyConnectedLayerInfo fc_info = node.info();
926 
927  ARM_COMPUTE_ERROR_ON(input == nullptr);
928  ARM_COMPUTE_ERROR_ON(weights == nullptr);
929  ARM_COMPUTE_ERROR_ON(output == nullptr);
930 
931  // Create and configure function
932  auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
933  auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
934  auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
935  func->configure(input, weights, biases, output, fc_info);
936 
937  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
938 
939  // Log info
940  std::ostringstream qss;
941  if(is_quantized)
942  {
943  qss << " Input QuantInfo: " << input->info()->quantization_info()
944  << " Weights QuantInfo: " << weights->info()->quantization_info()
945  << " Output QuantInfo: " << output->info()->quantization_info();
946  }
947  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
948  << node.name()
949  << " Type: " << node.type()
950  << " Target: " << TargetInfo::TargetType
951  << " Data Type: " << input->info()->data_type()
952  << qss.str()
953  << " Input shape: " << input->info()->tensor_shape()
954  << " Weights shape: " << weights->info()->tensor_shape()
955  << " Output shape: " << output->info()->tensor_shape()
956  << std::endl);
957 
958  return RETURN_UNIQUE_PTR(func);
959 }
960 
961 /** Create a backend generate proposals layer function
962  *
963  * @tparam GenerateProposalsLayerFunction Backend generate proposals function
964  * @tparam TargetInfo Target-specific information
965  *
966  * @param[in] node Node to create the backend function for
967  * @param[in] ctx Graph context
968  *
969  * @return Backend generate proposals layer function
970  */
971 template <typename GenerateProposalsLayerFunction, typename TargetInfo>
973 {
974  validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
975 
976  // Extract IO and info
977  typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
978  typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
979  typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
980  typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
981  typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
982  typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
983  const GenerateProposalsInfo info = node.info();
984 
985  ARM_COMPUTE_ERROR_ON(scores == nullptr);
986  ARM_COMPUTE_ERROR_ON(deltas == nullptr);
987  ARM_COMPUTE_ERROR_ON(anchors == nullptr);
988  ARM_COMPUTE_ERROR_ON(proposals == nullptr);
989  ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
990 
991  // Create and configure function
992  auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
993  func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
994 
995  // Log info
996  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
997  << " Target " << TargetInfo::TargetType
998  << " Data Type: " << scores->info()->data_type()
999  << " Scores shape: " << scores->info()->tensor_shape()
1000  << " Deltas shape: " << deltas->info()->tensor_shape()
1001  << " Anchors shape: " << anchors->info()->tensor_shape()
1002  << " Proposals shape: " << proposals->info()->tensor_shape()
1003  << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1004  << " Scores Out shape: " << scores_out->info()->tensor_shape()
1005  << std::endl);
1006 
1007  return RETURN_UNIQUE_PTR(func);
1008 }
1009 
1010 /** Create a backend normalization layer function
1011  *
1012  * @tparam NormalizationLayerFunction Backend normalization function
1013  * @tparam TargetInfo Target-specific information
1014  *
1015  * @param[in] node Node to create the backend function for
1016  * @param[in] ctx Graph context
1017  *
1018  * @return Backend normalization layer function
1019  */
1020 template <typename NormalizationLayerFunction, typename TargetInfo>
1021 std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
1022 {
1023  ARM_COMPUTE_UNUSED(ctx);
1024 
1025  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1026 
1027  // Extract IO and info
1028  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1029  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1030  const NormalizationLayerInfo norm_info = node.normalization_info();
1031  ARM_COMPUTE_ERROR_ON(input == nullptr);
1032  ARM_COMPUTE_ERROR_ON(output == nullptr);
1033 
1034  // Create and configure function
1035  auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
1036  func->configure(input, output, norm_info);
1037 
1038  // Log info
1039  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1040  << node.name()
1041  << " Type: " << node.type()
1042  << " Target: " << TargetInfo::TargetType
1043  << " Data Type: " << input->info()->data_type()
1044  << " Input shape: " << input->info()->tensor_shape()
1045  << " Output shape: " << output->info()->tensor_shape()
1046  << " Normalization info: " << norm_info.type()
1047  << std::endl);
1048 
1049  return RETURN_UNIQUE_PTR(func);
1050 }
1051 
1052 /** Create a backend normalize planar YUV layer function
1053  *
1054  * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1055  * @tparam TargetInfo Target-specific information
1056  *
1057  * @param[in] node Node to create the backend function for
1058  *
1059  * @return Backend normalize plnar YUV layer function
1060  */
1061 template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1063 {
1064  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1065 
1066  // Extract IO and info
1067  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1068  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1069  typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1070  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1071  ARM_COMPUTE_ERROR_ON(input == nullptr);
1072  ARM_COMPUTE_ERROR_ON(mean == nullptr);
1073  ARM_COMPUTE_ERROR_ON(std == nullptr);
1074  ARM_COMPUTE_ERROR_ON(output == nullptr);
1075 
1076  // Create and configure function
1077  auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
1078  func->configure(input, output, mean, std);
1079 
1080  // Log info
1081  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1082  << node.name()
1083  << " Type: " << node.type()
1084  << " Target: " << TargetInfo::TargetType
1085  << " Data Type: " << input->info()->data_type()
1086  << " Shape: " << input->info()->tensor_shape()
1087  << std::endl);
1088 
1089  return RETURN_UNIQUE_PTR(func);
1090 }
1091 
1092 /** Create a backend pad layer function
1093  *
1094  * @tparam PadLayerFunction Backend pad function
1095  * @tparam TargetInfo Target-specific information
1096  *
1097  * @param[in] node Node to create the backend function for
1098  *
1099  * @return Backend pad layer function
1100  */
1101 template <typename PadLayerFunction, typename TargetInfo>
1102 std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1103 {
1104  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1105 
1106  // Extract IO and info
1107  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1108  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1109  const PaddingList &padding = node.padding();
1110  const PixelValue pad_value = node.pad_value();
1111  ARM_COMPUTE_ERROR_ON(input == nullptr);
1112  ARM_COMPUTE_ERROR_ON(output == nullptr);
1113 
1114  // Create and configure function
1115  auto func = support::cpp14::make_unique<PadLayerFunction>();
1116  func->configure(input, output, padding, pad_value);
1117 
1118  // Log info
1119  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1120  << node.name()
1121  << " Type: " << node.type()
1122  << " Target: " << TargetInfo::TargetType
1123  << " Data Type: " << input->info()->data_type()
1124  << " Input shape: " << input->info()->tensor_shape()
1125  << " Output shape: " << output->info()->tensor_shape()
1126  << std::endl);
1127 
1128  return RETURN_UNIQUE_PTR(func);
1129 }
1130 
1131 /** Create a backend permute layer function
1132  *
1133  * @tparam PermuteLayerFunction Backend permute function
1134  * @tparam TargetInfo Target-specific information
1135  *
1136  * @param[in] node Node to create the backend function for
1137  *
1138  * @return Backend permute layer function
1139  */
1140 template <typename PermuteLayerFunction, typename TargetInfo>
1141 std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1142 {
1143  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1144 
1145  // Extract IO and info
1146  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1147  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1148  const PermutationVector &perm = node.permutation_vector();
1149  ARM_COMPUTE_ERROR_ON(input == nullptr);
1150  ARM_COMPUTE_ERROR_ON(output == nullptr);
1151 
1152  // Create and configure function
1153  auto func = support::cpp14::make_unique<PermuteLayerFunction>();
1154  func->configure(input, output, perm);
1155 
1156  // Log info
1157  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1158  << node.name()
1159  << " Type: " << node.type()
1160  << " Target: " << TargetInfo::TargetType
1161  << " Data Type: " << input->info()->data_type()
1162  << " Input shape: " << input->info()->tensor_shape()
1163  << " Output shape: " << output->info()->tensor_shape()
1164  << " Permutation vector: " << perm
1165  << std::endl);
1166 
1167  return RETURN_UNIQUE_PTR(func);
1168 }
1169 
1170 /** Create a backend pooling layer function
1171  *
1172  * @tparam PoolingLayerFunction Backend pooling function
1173  * @tparam TargetInfo Target-specific information
1174  *
1175  * @param[in] node Node to create the backend function for
1176  *
1177  * @return Backend pooling layer function
1178  */
1179 template <typename PoolingLayerFunction, typename TargetInfo>
1180 std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1181 {
1182  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1183 
1184  // Extract IO and info
1185  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1186  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1187  const PoolingLayerInfo pool_info = node.pooling_info();
1188  ARM_COMPUTE_ERROR_ON(input == nullptr);
1189  ARM_COMPUTE_ERROR_ON(output == nullptr);
1190 
1191  // Create and configure function
1192  auto func = support::cpp14::make_unique<PoolingLayerFunction>();
1193  func->configure(input, output, pool_info);
1194 
1195  // Log info
1196  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1197  << node.name()
1198  << " Type: " << node.type()
1199  << " Target: " << TargetInfo::TargetType
1200  << " Data Type: " << input->info()->data_type()
1201  << " Input shape: " << input->info()->tensor_shape()
1202  << " Output shape: " << output->info()->tensor_shape()
1203  << " Pooling info: " << pool_info.pool_type
1204  << std::endl);
1205 
1206  return RETURN_UNIQUE_PTR(func);
1207 }
1208 
1209 /** Create a backend PRelu layer function
1210  *
1211  * @tparam PReluFunction Backend PRelu function
1212  * @tparam TargetInfo Target-specific information
1213  *
1214  * @param[in] node Node to create the backend function for
1215  *
1216  * @return Backend PRelu layer function
1217  */
1218 template <typename PReluFunction, typename TargetInfo>
1219 std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1220 {
1221  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1222 
1223  // Extract IO and info
1224  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1225  typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1226  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1227  ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1228  ARM_COMPUTE_ERROR_ON(output == nullptr);
1229 
1230  // Create and configure function
1231  auto func = support::cpp14::make_unique<PReluFunction>();
1232  func->configure(input, alpha, output);
1233 
1234  // Log info
1235  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1236  << node.name()
1237  << " Type: " << node.type()
1238  << " Target: " << TargetInfo::TargetType
1239  << " Data Type: " << input->info()->data_type()
1240  << " Input shape: " << input->info()->tensor_shape()
1241  << " Output shape: " << output->info()->tensor_shape()
1242  << std::endl);
1243 
1244  return RETURN_UNIQUE_PTR(func);
1245 }
1246 
1247 /** Create a backend print layer function
1248  *
1249  * @tparam TargetInfo Target-specific information
1250  *
1251  * @param[in] node Node to create the backend function for
1252  *
1253  * @return Backend print layer function
1254  */
1255 template <typename TargetInfo>
1256 std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1257 {
1258  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1259 
1260  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1261  ARM_COMPUTE_ERROR_ON(input == nullptr);
1263 
1264  // Log info
1265  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1266  << node.name()
1267  << " Type: " << node.type()
1268  << " Target: " << TargetInfo::TargetType
1269  << " Data Type: " << input->info()->data_type()
1270  << " Input shape: " << input->info()->tensor_shape()
1271  << std::endl);
1272 
1273  return nullptr;
1274 }
1275 
1276 /** Create a backend priorbox layer function
1277  *
1278  * @tparam PriorBoxLayerFunction Backend priorbox function
1279  * @tparam TargetInfo Target-specific information
1280  *
1281  * @param[in] node Node to create the backend function for
1282  *
1283  * @return Backend priorbox layer function
1284  */
1285 template <typename PriorBoxLayerFunction, typename TargetInfo>
1286 std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1287 {
1288  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1289 
1290  // Extract IO and info
1291  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1292  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1293  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1294  const PriorBoxLayerInfo prior_info = node.priorbox_info();
1295  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1296  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1297  ARM_COMPUTE_ERROR_ON(output == nullptr);
1298 
1299  // Create and configure function
1300  auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
1301  func->configure(input0, input1, output, prior_info);
1302 
1303  // Log info
1304  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1305  << node.name()
1306  << " Type: " << node.type()
1307  << " Target: " << TargetInfo::TargetType
1308  << " Data Type: " << input0->info()->data_type()
1309  << " Input0 shape: " << input0->info()->tensor_shape()
1310  << " Input1 shape: " << input1->info()->tensor_shape()
1311  << " Output shape: " << output->info()->tensor_shape()
1312  << " PriorBoxLayer info: " << prior_info
1313  << std::endl);
1314 
1315  return RETURN_UNIQUE_PTR(func);
1316 }
1317 
1318 /** Create a backend quantization layer function
1319  *
1320  * @tparam QuantizationLayerFunction Backend quantization function
1321  * @tparam TargetInfo Target-specific information
1322  *
1323  * @param[in] node Node to create the backend function for
1324  *
1325  * @return Backend quantization layer function
1326  */
1327 template <typename QuantizationLayerFunction, typename TargetInfo>
1328 std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1329 {
1330  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1331 
1332  // Extract IO and info
1333  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1334  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1335  ARM_COMPUTE_ERROR_ON(input == nullptr);
1336  ARM_COMPUTE_ERROR_ON(output == nullptr);
1337 
1338  // Create and configure function
1339  auto func = support::cpp14::make_unique<QuantizationLayerFunction>();
1340  func->configure(input, output);
1341 
1342  // Log info
1343  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1344  << node.name()
1345  << " Type: " << node.type()
1346  << " Target: " << TargetInfo::TargetType
1347  << " Data Type: " << input->info()->data_type()
1348  << " Input shape: " << input->info()->tensor_shape()
1349  << " Output shape: " << output->info()->tensor_shape()
1350  << std::endl);
1351 
1352  return RETURN_UNIQUE_PTR(func);
1353 }
1354 
1355 /** Create a backend reorg layer function
1356  *
1357  * @tparam ReorgLayerFunction Backend reorg function
1358  * @tparam TargetInfo Target-specific information
1359  *
1360  * @param[in] node Node to create the backend function for
1361  *
1362  * @return Backend reshape layer function
1363  */
1364 template <typename ReorgLayerFunction, typename TargetInfo>
1365 std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1366 {
1367  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1368 
1369  // Extract IO and info
1370  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1371  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1372  ARM_COMPUTE_ERROR_ON(input == nullptr);
1373  ARM_COMPUTE_ERROR_ON(output == nullptr);
1374 
1375  // Create and configure function
1376  auto func = support::cpp14::make_unique<ReorgLayerFunction>();
1377  func->configure(input, output, node.stride());
1378 
1379  // Log info
1380  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1381  << node.name()
1382  << " Type: " << node.type()
1383  << " Target: " << TargetInfo::TargetType
1384  << " Data Type: " << input->info()->data_type()
1385  << " Input shape: " << input->info()->tensor_shape()
1386  << " Output shape: " << output->info()->tensor_shape()
1387  << std::endl);
1388 
1389  return RETURN_UNIQUE_PTR(func);
1390 }
1391 
1392 /** Create a backend reshape layer function
1393  *
1394  * @tparam ReshapeLayerFunction Backend reshape function
1395  * @tparam TargetInfo Target-specific information
1396  *
1397  * @param[in] node Node to create the backend function for
1398  *
1399  * @return Backend reshape layer function
1400  */
1401 template <typename ReshapeLayerFunction, typename TargetInfo>
1402 std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1403 {
1404  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1405 
1406  // Extract IO and info
1407  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1408  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1409  ARM_COMPUTE_ERROR_ON(input == nullptr);
1410  ARM_COMPUTE_ERROR_ON(output == nullptr);
1411 
1412  // Create and configure function
1413  auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
1414  func->configure(input, output);
1415 
1416  // Log info
1417  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1418  << node.name()
1419  << " Type: " << node.type()
1420  << " Target: " << TargetInfo::TargetType
1421  << " Data Type: " << input->info()->data_type()
1422  << " Input shape: " << input->info()->tensor_shape()
1423  << " Output shape: " << output->info()->tensor_shape()
1424  << std::endl);
1425 
1426  return RETURN_UNIQUE_PTR(func);
1427 }
1428 
1429 /** Create a backend resize layer function
1430  *
1431  * @tparam ResizeLayerFunction Backend resize function
1432  * @tparam TargetInfo Target-specific information
1433  *
1434  * @param[in] node Node to create the backend function for
1435  *
1436  * @return Backend resize layer function
1437  */
1438 template <typename ResizeLayerFunction, typename TargetInfo>
1439 std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1440 {
1441  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1442 
1443  // Extract IO and info
1444  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1445  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1446  ARM_COMPUTE_ERROR_ON(input == nullptr);
1447  ARM_COMPUTE_ERROR_ON(output == nullptr);
1448  const InterpolationPolicy policy = node.policy();
1449 
1450  // Create and configure function
1451  auto func = support::cpp14::make_unique<ResizeLayerFunction>();
1452  func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT });
1453 
1454  // Log info
1455  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1456  << node.name()
1457  << " Type: " << node.type()
1458  << " Target: " << TargetInfo::TargetType
1459  << " Data Type: " << input->info()->data_type()
1460  << " Input shape: " << input->info()->tensor_shape()
1461  << " Output shape: " << output->info()->tensor_shape()
1462  << " Interpolation: " << policy
1463  << std::endl);
1464 
1465  return RETURN_UNIQUE_PTR(func);
1466 }
1467 
1468 /** Create a backend ROI align layer function
1469  *
1470  * @tparam ROIAlignLayerFunction ROI Align function
1471  * @tparam TargetInfo Target-specific information
1472  *
1473  * @param[in] node Node to create the backend function for
1474  *
1475  * @return ROI Align layer function
1476  */
1477 template <typename ROIAlignLayerFunction, typename TargetInfo>
1478 std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1479 {
1480  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1481 
1482  // Extract IO and info
1483  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1484  typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1485  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1486  ARM_COMPUTE_ERROR_ON(input == nullptr);
1487  ARM_COMPUTE_ERROR_ON(output == nullptr);
1488  ARM_COMPUTE_ERROR_ON(rois == nullptr);
1489 
1490  const ROIPoolingLayerInfo pool_info = node.pooling_info();
1491 
1492  // Create and configure function
1493  auto func = support::cpp14::make_unique<ROIAlignLayerFunction>();
1494 
1495  func->configure(input, rois, output, pool_info);
1496 
1497  // Log info
1498  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1499  << node.name()
1500  << " Type: " << node.type()
1501  << " Target: " << TargetInfo::TargetType
1502  << " Data Type: " << input->info()->data_type()
1503  << " Input shape: " << input->info()->tensor_shape()
1504  << " Output shape: " << output->info()->tensor_shape()
1505  << " ROIs shape: " << rois->info()->tensor_shape()
1506  << " ROIPooling width: " << pool_info.pooled_width()
1507  << " ROIPooling height: " << pool_info.pooled_height()
1508  << std::endl);
1509 
1510  return RETURN_UNIQUE_PTR(func);
1511 }
1512 
1513 /** Create a backend slice layer function
1514  *
1515  * @tparam SliceLayerFunction Backend slice function
1516  * @tparam TargetInfo Target-specific information
1517  *
1518  * @param[in] node Node to create the backend function for
1519  *
1520  * @return Backend slice layer function
1521  */
1522 template <typename SliceLayerFunction, typename TargetInfo>
1523 std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1524 {
1525  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1526 
1527  // Extract IO and info
1528  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1529  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1530  ARM_COMPUTE_ERROR_ON(input == nullptr);
1531  ARM_COMPUTE_ERROR_ON(output == nullptr);
1532 
1533  // Create and configure function
1534  auto func = support::cpp14::make_unique<SliceLayerFunction>();
1535  func->configure(input, output, node.starts(), node.ends());
1536 
1537  // Log info
1538  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1539  << node.name()
1540  << " Type: " << node.type()
1541  << " Target: " << TargetInfo::TargetType
1542  << " Data Type: " << input->info()->data_type()
1543  << " Input shape: " << input->info()->tensor_shape()
1544  << " Output shape: " << output->info()->tensor_shape()
1545  << std::endl);
1546 
1547  return RETURN_UNIQUE_PTR(func);
1548 }
1549 
1550 /** Create a backend softmax layer function
1551  *
1552  * @tparam SoftmaxLayerFunction Backend softmax function
1553  * @tparam TargetInfo Target-specific information
1554  *
1555  * @param[in] node Node to create the backend function for
1556  * @param[in] ctx Graph context
1557  *
1558  * @return Backend softmax layer function
1559  */
1560 template <typename SoftmaxLayerFunction, typename TargetInfo>
1561 std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1562 {
1563  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1564 
1565  // Extract IO and info
1566  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1567  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1568  const float beta = node.beta();
1569  ARM_COMPUTE_ERROR_ON(input == nullptr);
1570  ARM_COMPUTE_ERROR_ON(output == nullptr);
1571 
1572  // Create and configure function
1573  auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1574  func->configure(input, output, beta);
1575 
1576  // Log info
1577  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1578  << node.name()
1579  << " Type: " << node.type()
1580  << " Target: " << TargetInfo::TargetType
1581  << " Data Type: " << input->info()->data_type()
1582  << " Input shape: " << input->info()->tensor_shape()
1583  << " Output shape: " << output->info()->tensor_shape()
1584  << std::endl);
1585 
1586  return RETURN_UNIQUE_PTR(func);
1587 }
1588 
1589 /** Create a backend layer stack function
1590  *
1591  * @tparam StackLayerFunction Backend stack function
1592  * @tparam TargetInfo Target-specific information
1593  *
1594  * @param[in] node Node to create the backend function for
1595  *
1596  * @return Backend stack layer function
1597  */
1598 template <typename StackLayerFunction, typename TargetInfo>
1599 std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1600 {
1601  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1602  ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1603 
1604  // Extract IO and info
1605  std::vector<typename TargetInfo::TensorType *> inputs;
1606  for(unsigned int i = 0; i < node.num_inputs(); ++i)
1607  {
1608  inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1609  }
1610  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1611  const int axis = node.axis();
1612 
1613  // Create and configure function
1614  auto func = support::cpp14::make_unique<StackLayerFunction>();
1615  func->configure(inputs, axis, output);
1616 
1617  // Log info
1618  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1619  << node.name()
1620  << " Type: " << node.type()
1621  << " Target: " << TargetInfo::TargetType
1622  << " Data Type: " << output->info()->data_type()
1623  << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1624  << " Output shape: " << output->info()->tensor_shape()
1625  << " Num Inputs: " << inputs.size()
1626  << " Axis: " << axis
1627  << std::endl);
1628 
1629  return RETURN_UNIQUE_PTR(func);
1630 }
1631 /** Create a backend Upsample layer function
1632  *
1633  * @tparam UpsampleLayerFunction Backend Upsample function
1634  * @tparam TargetInfo Target-specific information
1635  *
1636  * @param[in] node Node to create the backend function for
1637  * @param[in] ctx Graph context
1638  *
1639  * @return Backend Upsample layer function
1640  */
1641 template <typename UpsampleLayerFunction, typename TargetInfo>
1642 std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1643 {
1644  ARM_COMPUTE_UNUSED(ctx);
1645  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1646 
1647  // Extract IO and info
1648  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1649  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1650  const Size2D info = node.info();
1651  const InterpolationPolicy upsampling_policy = node.upsampling_policy();
1653  ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1654  ARM_COMPUTE_ERROR_ON(input == nullptr);
1655  ARM_COMPUTE_ERROR_ON(output == nullptr);
1656 
1657  // Create and configure function
1658  auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1659  func->configure(input, output, info, upsampling_policy);
1660 
1661  // Log info
1662  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1663  << node.name()
1664  << " Type: " << node.type()
1665  << " Target: " << TargetInfo::TargetType
1666  << " Data Type: " << input->info()->data_type()
1667  << " Input shape: " << input->info()->tensor_shape()
1668  << " Output shape: " << output->info()->tensor_shape()
1669  << " Strides: " << info
1670  << " Upsampling policy: " << upsampling_policy
1671  << std::endl);
1672 
1673  return RETURN_UNIQUE_PTR(func);
1674 }
1675 /** Create a backend YOLO layer function
1676  *
1677  * @tparam YoloLayerFunction Backend YOLO function
1678  * @tparam TargetInfo Target-specific information
1679  *
1680  * @param[in] node Node to create the backend function for
1681  * @param[in] ctx Graph context
1682  *
1683  * @return Backend YOLO layer function
1684  */
1685 template <typename YOLOlayerFunction, typename TargetInfo>
1686 std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1687 {
1688  ARM_COMPUTE_UNUSED(ctx);
1689  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1690 
1691  // Extract IO and info
1692  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1693  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1694  const ActivationLayerInfo act_info = node.activation_info();
1695  const int32_t num_classes = node.num_classes();
1696  ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1697  ARM_COMPUTE_ERROR_ON(input == nullptr);
1698  ARM_COMPUTE_ERROR_ON(output == nullptr);
1699 
1700  // Create and configure function
1701  auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1702  func->configure(input, output, act_info, num_classes);
1703 
1704  // Log info
1705  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1706  << node.name()
1707  << " Type: " << node.type()
1708  << " Target: " << TargetInfo::TargetType
1709  << " Data Type: " << input->info()->data_type()
1710  << " Input shape: " << input->info()->tensor_shape()
1711  << " Output shape: " << output->info()->tensor_shape()
1712  << " Activation function: " << act_info.activation()
1713  << " Num classes: " << num_classes
1714  << std::endl);
1715 
1716  return RETURN_UNIQUE_PTR(func);
1717 }
1718 } // namespace detail
1719 } // namespace backends
1720 } // namespace graph
1721 } // namespace arm_compute
1722 
1723 #endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */
NodeType type() const override
Returns node's type.
std::unique_ptr< IFunction > create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
Create a backend normalization layer function.
std::unique_ptr< IFunction > create_dequantization_layer(DequantizationLayerNode &node)
Create a backend dequantize layer function.
std::string name() const
Returns node's name.
Definition: INode.cpp:107
NodeType type() const override
Returns node's type.
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
NodeType type() const override
Returns node's type.
InterpolationPolicy
Interpolation method.
Definition: Types.h:369
NodeType type() const override
Returns node's type.
Generate Proposals Information class.
Definition: Types.h:1319
const DataLayout data_layout
Definition: Im2Col.cpp:146
EltwiseOperation
Supported Element-wise operations.
Definition: Types.h:102
NodeType type() const override
Returns node's type.
NodeType type() const override
Returns node's type.
std::unique_ptr< IFunction > create_slice_layer(SliceLayerNode &node)
Create a backend slice layer function.
int stride() const
Stride value to use for reorganizing the values in the output tensor.
NodeType type() const override
Returns node's type.
RoundingPolicy rounding_policy() const
Rounding policy accessor.
const ROIPoolingLayerInfo & pooling_info() const
ROIPoolingLayerInfo accessor.
std::unique_ptr< IFunction > create_eltwise_layer(EltwiseLayerNode &node)
Create a backend element-wise operation layer function.
bool enabled() const
Check if initialised.
Definition: Types.h:1567
ActivationLayerInfo fused_activation() const
Returns fused activation.
std::vector< PaddingInfo > PaddingList
List of padding information.
Definition: Types.h:458
DataLayoutDimension concatenation_axis() const
Concatenation axis parameter accessor.
ITensorHandle * handle()
Backend tensor handle accessor.
Definition: Tensor.cpp:55
std::unique_ptr< IFunction > create_batch_normalization_layer(BatchNormalizationLayerNode &node)
Create a backend batch normalization layer function.
NodeType type() const override
Returns node's type.
FastMathHint fast_math_hint() const
Fast math hint accessor.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
std::unique_ptr< IFunction > create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Create a backend batch normalization layer function.
NodeType type() const override
Returns node's type.
Target assigned_target() const
Returns assigned target for this node.
Definition: INode.cpp:198
TensorType
Memory type.
Definition: Types.h:38
std::unique_ptr< IFunction > create_prelu_layer(PReluLayerNode &node)
Create a backend PRelu layer function.
unsigned int num_groups() const
Number of groups accessor.
std::unique_ptr< IFunction > create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
Create a backend layer depth-wise convolution function.
size_t num_outputs() const
Returns number of outputs of the node.
Definition: INode.cpp:183
FullyConnectedLayerInfo info() const
Fully connected layer addition information.
NodeType type() const override
Returns node's type.
PixelValue pad_value() const
Padding value accessor.
std::unique_ptr< IFunction > create_resize_layer(ResizeLayerNode &node)
Create a backend resize layer function.
Wrapper function to first apply {NE, CL}BatchNormalizationLayer on the weights and then run {NE,...
Normalization Layer Information class.
Definition: Types.h:1614
std::unique_ptr< IFunction > create_permute_layer(PermuteLayerNode &node)
Create a backend permute layer function.
ActivationLayerInfo fused_activation() const
Returns fused activation.
PadStrideInfo deconvolution_info() const
Deconvolution metadata accessor.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
std::unique_ptr< IFunction > create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
Create a backend generate proposals layer function.
NodeType type() const override
Returns node's type.
Fully connected layer info.
Definition: Types.h:1580
const PermutationVector & permutation_vector() const
Permutation vector accessor.
Fast math enabled for Convolution layer.
NodeType type() const override
Returns node's type.
std::unique_ptr< IFunction > create_flatten_layer(FlattenLayerNode &node)
Create a backend flatten layer function.
NodeType type() const override
Returns node's type.
unsigned int pooled_width() const
Get the pooled width of the layer.
Definition: Types.h:1291
Wrapper function to first apply {NE, CL}BatchNormalizationLayer on the weights and then run {NE,...
ActivationLayerInfo fused_activation() const
Returns fused activation.
std::unique_ptr< IFunction > create_pad_layer(PadLayerNode &node)
Create a backend pad layer function.
Output values are defined to match the source pixel whose center is nearest to the sample position.
EltwiseOperation eltwise_operation() const
Eltwise operation accessor.
Activation Layer Information class.
Definition: Types.h:1517
float epsilon() const
Epsilon parameter accessor.
unsigned int num_groups() const
Number of groups in convolution accessor.
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::unique_ptr< arm_compute::IFunction > create_stack_layer(StackLayerNode &node)
Create a backend layer stack function.
Copyright (c) 2017-2020 Arm Limited.
NodeType type() const override
Returns node's type.
NodeType type() const override
Returns node's type.
PoolingLayerInfo pooling_info() const
Pooling metadata accessor.
NormalizationLayerInfo normalization_info() const
Normalization info accessor.
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor's metadata.
Definition: Tensor.cpp:33
descriptors::UnaryEltwiseLayerDescriptor eltwise_descriptor() const
Unary eltwise layer descriptor.
Convolution Layer Weights Information class.
Definition: Types.h:1694
std::unique_ptr< IFunction > create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
Create a backend softmax layer function.
NodeType type() const override
Returns node's type.
1 channel, 1 S32 per channel
TensorDescriptor & desc()
TensorInfo metadata accessor.
Definition: Tensor.cpp:40
unsigned int num_groups() const
Number of groups in convolution accessor.
std::unique_ptr< IFunction > create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Create a backend fused depthwise convolution batch normalization layer function.
Node interface.
Definition: INode.h:45
std::unique_ptr< IFunction > create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
Create a backend convolution layer function.
const PaddingList & padding() const
Padding list accessor.
std::unique_ptr< IFunction > create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
Create a backend Upsample layer function.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
Size2D info() const
Stride info metadata accessor.
std::unique_ptr< IFunction > create_roi_align_layer(ROIAlignLayerNode &node)
Create a backend ROI align layer function.
NodeType type() const override
Returns node's type.
bool is_enabled() const
Enabled parameter accessor.
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
NodeType type() const override
Returns node's type.
Tensor * output(size_t idx) const
Returns the tensor of a given output of the node.
Definition: INode.cpp:158
ActivationLayerInfo fused_activation() const
Returns fused activation.
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
const unsigned int num_groups
Definition: Im2Col.cpp:148
NodeType type() const override
Returns node's type.
NodeType type() const override
Returns node's type.
int32_t num_classes() const
Number of classes metadata accessor.
Coordinates starts() const
Start coordinates accessor.
DetectionPostProcessLayerInfo detection_post_process_info() const
DetectionPostProcess metadata accessor.
NodeType type() const override
Returns node's type.
float beta() const
Beta parameter accessor.
Pooling Layer Information struct.
Definition: Types.h:1181
Coordinates ends() const
End coordinates accessor.
void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
PadStrideInfo convolution_info() const
Convolution metadata accessor.
std::unique_ptr< IFunction > create_priorbox_layer(PriorBoxLayerNode &node)
Create a backend priorbox layer function.
NodeID id() const
Returns node's ID.
Definition: INode.cpp:102
UnaryEltwiseOperation op
Unary element-wise operation to perform.
PriorBox layer info.
Definition: Types.h:806
std::unique_ptr< IFunction > create_detection_output_layer(DetectionOutputLayerNode &node)
Create a backend detection output layer function.
PadStrideInfo convolution_info() const
Convolution metadata accessor.
Padding and stride information class.
Definition: Types.h:689
NodeType type() const override
Returns node's type.
ActivationLayerInfo fused_activation() const
Returns fused activation.
DetectionOutputLayerInfo detection_output_info() const
DetectionOutput metadata accessor.
NodeType type() const override
Returns node's type.
Bounding Box Transform information class.
Definition: Types.h:1450
virtual QuantizationInfo quantization_info() const =0
Get the quantization settings (scale and offset) of the tensor.
NodeType type() const override
Returns node's type.
std::unique_ptr< IFunction > create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
Create a backend detection post process layer function.
Tensor handle interface object.
Definition: ITensorHandle.h:38
std::unique_ptr< IFunction > create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
Create a backend deconvolution layer function.
std::unique_ptr< IFunction > create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
Create a backend unary element-wise operation layer function.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1143
NodeType type() const override
Returns node's type.
bool is_in_place_operation(void *input, void *output)
Checks if an operation is in place.
Definition: Utils.h:77
Strides of an item in bytes.
Definition: Strides.h:37
std::unique_ptr< IFunction > create_print_layer(PrintLayerNode &node)
Create a backend print layer function.
ConvolutionMethod convolution_method() const
Convolution layer method accessor.
NodeType type() const override
Returns node's type.
Detection Output layer info.
Definition: Types.h:943
DetectionPostProcess Layer node.
const BoundingBoxTransformInfo & info() const
BoundingBoxTransformInfo accessor.
std::unique_ptr< IFunction > create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
Create a backend fully connected layer function.
std::unique_ptr< IFunction > create_activation_layer(ActivationLayerNode &node)
Creates a backend activation layer function.
int axis() const
Stack axis parameter accessor.
ActivationLayerInfo activation_info() const
Activation metadata accessor.
unsigned int pooled_height() const
Get the pooled height of the layer.
Definition: Types.h:1296
ROI Pooling Layer Information class.
Definition: Types.h:1276
NodeType type() const override
Returns node's type.
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
std::unique_ptr< IFunction > create_quantization_layer(QuantizationLayerNode &node)
Create a backend quantization layer function.
std::string to_string(const ICLTensor &arg)
InterpolationPolicy upsampling_policy() const
Upsampling policy metadata accessor.
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89
std::unique_ptr< IFunction > create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
Create a backend normalize planar YUV layer function.
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
ConvolutionMethod
Supported Convolution layer methods.
Definition: Types.h:116
std::unique_ptr< IFunction > create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
Create a backend bounding box transform layer function.
Detection Output layer info.
Definition: Types.h:1062
size_t num_inputs() const
Returns number of inputs of the node.
Definition: INode.cpp:178
const GenerateProposalsInfo & info() const
GenerateProposalsInfo accessor.
ConvertPolicy convert_policy() const
Convert policy accessor.
ActivationLayerInfo activation_info() const
Activation metadata accessor.
TargetInfo::TensorType * get_backing_tensor(arm_compute::graph::Tensor *tensor)
Returns backing tensor of a given tensor.
NodeType type() const override
Returns node's type.
NodeType type() const override
Returns node's type.
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1552
virtual NodeType type() const =0
Returns node's type.
std::shared_ptr< IWeightsManager > get_weights_manager(GraphContext &ctx, Target target)
Returns the weights manager for a given target.
Definition: Utils.h:102
Tensor * input(size_t idx) const
Returns the tensor of a given input of the node.
Definition: INode.cpp:150
NodeType type() const override
Returns node's type.
std::unique_ptr< IFunction > create_pooling_layer(PoolingLayerNode &node)
Create a backend pooling layer function.
NodeType type() const override
Returns node's type.
DataLayout
[DataLayout enum definition]
Definition: Types.h:120
std::unique_ptr< arm_compute::IFunction > create_concatenate_layer(ConcatenateLayerNode &node)
Create a backend layer concatenate function.
ConvertPolicy
Policy to handle overflow.
Definition: Types.h:362
std::unique_ptr< IFunction > create_reshape_layer(ReshapeLayerNode &node)
Create a backend reshape layer function.
size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get index of a tensor's given dimension depending on its layout.
Definition: Utils.cpp:129
std::unique_ptr< IFunction > create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
Create a backend YOLO layer function.
#define RETURN_UNIQUE_PTR(x)
PriorBoxLayerInfo priorbox_info() const
PriorBox metadata accessor.
std::unique_ptr< IFunction > create_reorg_layer(ReorgLayerNode &node)
Create a backend reorg layer function.
Tensor object.
Definition: Tensor.h:41
std::unique_ptr< IFunction > create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
Create a backend channel shuffle layer function.
InterpolationPolicy policy() const
Interpolation policy accessor.
UnaryEltwiseOperation
Supported Unary Element-wise operations.
Definition: Types.h:110
NodeType type() const override
Returns node's type.