Compute Library
 21.02
FunctionHelpers.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25 #define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
26 
36 
37 #include "arm_compute/core/Error.h"
40 #include "support/Cast.h"
41 
42 namespace arm_compute
43 {
44 namespace graph
45 {
46 namespace backends
47 {
48 namespace detail
49 {
50 /** Returns backing tensor of a given tensor
51  *
52  * @tparam TargetInfo Target information
53  *
54  * @param[in] tensor Tensor to extract the backing tensor from
55  *
56  * @return Backing tensor if present else nullptr
57  */
58 template <typename TargetInfo>
60 {
61  typename TargetInfo::TensorType *backing_tensor = nullptr;
62  if(tensor != nullptr)
63  {
64  ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
65  // Get backing tensor handle
66  ITensorHandle *tensor_handle = tensor->handle();
67  // Get backing tensor
68  backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
69  }
70 
71  return backing_tensor;
72 }
73 
74 template <typename TargetInfo>
75 void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
76 {
77  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
78  << " Target: " << TargetInfo::TargetType
79  << " ID: " << node.id()
80  << node.name()
81  << std::endl);
82 
83  ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
84  ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
85  ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
86  ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
87 }
88 
89 /** Creates a backend activation layer function
90  *
91  * @tparam ActivationLayerFunction Backend activation function
92  * @tparam TargetInfo Target-specific information
93  *
94  * @param[in] node Node to create the backend function for
95  *
96  * @return Backend activation layer function
97  */
98 template <typename ActivationLayerFunction, typename TargetInfo>
99 std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
100 {
101  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
102 
103  // Extract IO and info
104  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
105  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
106  const ActivationLayerInfo act_info = node.activation_info();
107 
108  // Create function
109  auto func = std::make_unique<ActivationLayerFunction>();
110  func->configure(input, output, act_info);
111 
112  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
113  << node.name()
114  << " Type: " << node.type()
115  << " Target: " << TargetInfo::TargetType
116  << " Data Type: " << input->info()->data_type()
117  << " Shape: " << input->info()->tensor_shape()
118  << " Activation function: " << act_info.activation()
119  << " a: " << act_info.a()
120  << " b: " << act_info.b()
121  << " InPlace : " << is_in_place_operation(input, output)
122  << std::endl);
123 
124  return std::move(func);
125 }
126 
127 /** Creates a backend argminmax layer function
128  *
129  * @tparam ArgMinMaxLayerFunction Backend activation function
130  * @tparam TargetInfo Target-specific information
131  *
132  * @param[in] node Node to create the backend function for
133  *
134  * @return Backend argminmax layer function
135  */
136 template <typename ArgMinMaxLayerFunction, typename TargetInfo>
137 std::unique_ptr<IFunction> create_arg_min_max_layer(ArgMinMaxLayerNode &node)
138 {
139  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
140 
141  // Extract IO and info
142  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
143  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
144  const ReductionOperation op = node.reduction_operation();
145  unsigned int axis = node.axis();
146 
147  // Create function
148  auto func = std::make_unique<ArgMinMaxLayerFunction>();
149  func->configure(input, axis, output, op);
150 
151  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
152  << node.name()
153  << " Type: " << node.type()
154  << " Target: " << TargetInfo::TargetType
155  << " Data Type: " << input->info()->data_type()
156  << " Shape: " << input->info()->tensor_shape()
157  << " Reduction Operation: " << op
158  << " axis: " << axis
159  << std::endl);
160 
161  return std::move(func);
162 }
163 
164 /** Create a backend batch normalization layer function
165  *
166  * @tparam BatchNormalizationLayerFunction Backend batch normalization function
167  * @tparam TargetInfo Target-specific information
168  *
169  * @param[in] node Node to create the backend function for
170  *
171  * @return Backend batch normalization layer function
172  */
173 template <typename BatchNormalizationLayerFunction, typename TargetInfo>
175 {
176  validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
177 
178  // Extract IO and info
179  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
180  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
181  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
182  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
183  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
184 
185  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
186  const float epsilon = node.epsilon();
187  const ActivationLayerInfo fused_act = node.fused_activation();
188 
189  // Create and configure function
190  auto func = std::make_unique<BatchNormalizationLayerFunction>();
191  func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
192 
193  // Log info
194  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
195  << node.name()
196  << " Type: " << node.type()
197  << " Target: " << TargetInfo::TargetType
198  << " Data Type: " << input->info()->data_type()
199  << " Shape: " << input->info()->tensor_shape()
200  << " Epsilon: " << epsilon << " "
201  << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
202  << " InPlace: " << is_in_place_operation(input, output)
203  << std::endl);
204 
205  return std::move(func);
206 }
207 
208 /** Create a backend batch normalization layer function
209  *
210  * @tparam BatchNormalizationLayerFunction Backend batch normalization function
211  * @tparam TargetInfo Target-specific information
212  *
213  * @param[in] node Node to create the backend function for
214  * @param[in] ctx Graph context
215  *
216  * @return Backend batch normalization layer function
217  */
218 template <typename FusedLayerTypes, typename TargetInfo>
220 {
221  validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
222 
223  // Extract IO and info
224  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
225  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
226  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
227  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
228  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
229  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
230  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
231 
232  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
233 
234  const PadStrideInfo conv_info = node.convolution_info();
235  const unsigned int num_groups = node.num_groups();
236  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
237  const ActivationLayerInfo fused_act = node.fused_activation();
238  const float epsilon = node.epsilon();
239 
240  // Create and configure function (we assume that functions have been validated before creation)
241  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
242  std::unique_ptr<IFunction> func;
243  std::string func_name;
244 
246 
247  // Create and configure function
248  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
249  std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
250 
251  // Log info
252  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
253  << node.name()
254  << " Type: " << node.type()
255  << " Target: " << TargetInfo::TargetType
256  << " Data Type: " << input->info()->data_type()
257  << " Input shape: " << input->info()->tensor_shape()
258  << " Weights shape: " << weights->info()->tensor_shape()
259  << " Output shape: " << output->info()->tensor_shape()
260  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
261  << std::endl);
262  return std::move(func);
263 }
264 
265 /** Create a backend fused depthwise convolution batch normalization layer function
266  *
267  * @tparam FusedLayerTypes Fused layer types
268  * @tparam TargetInfo Target-specific information
269  *
270  * @param[in] node Node to create the backend function for
271  * @param[in] ctx Graph context
272  *
273  * @return Backend fused depthwise convolution batch normalization layer function
274  */
275 template <typename FusedLayerTypes, typename TargetInfo>
277 {
278  validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
279 
280  // Extract IO and info
281  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
282  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
283  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
284  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
285  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
286  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
287  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
288 
289  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
290 
291  const PadStrideInfo conv_info = node.convolution_info();
292  const unsigned int depth_multiplier = node.depth_multiplier();
293  const ActivationLayerInfo fused_act = node.fused_activation();
294  const float epsilon = node.epsilon();
295 
296  // Create and configure function (we assume that functions have been validated before creation)
297  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
298  std::unique_ptr<IFunction> func;
299  std::string func_name;
300 
302 
303  // Create and configure function
304  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
305  std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
306 
307  // Log info
308  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
309  << node.name()
310  << " Type: " << node.type()
311  << " Target: " << TargetInfo::TargetType
312  << " Data Type: " << input->info()->data_type()
313  << " Input shape: " << input->info()->tensor_shape()
314  << " Weights shape: " << weights->info()->tensor_shape()
315  << " Output shape: " << output->info()->tensor_shape()
316  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
317  << std::endl);
318  return std::move(func);
319 }
320 
321 /** Create a backend bounding box transform layer function
322  *
323  * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
324  * @tparam TargetInfo Target-specific information
325  *
326  * @param[in] node Node to create the backend function for
327  *
328  * @return Backend bounding box transform layer function
329  */
330 template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
332 {
333  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
334 
335  // Extract IO and info
336  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
337  typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
338  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
339  const BoundingBoxTransformInfo bbox_info = node.info();
340 
341  // Create and configure function
342  auto func = std::make_unique<BoundingBoxTransformLayerFunction>();
343  func->configure(input, output, deltas, bbox_info);
344 
345  // Log info
346  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
347  << node.name()
348  << " Type: " << node.type()
349  << " Target: " << TargetInfo::TargetType
350  << " Data Type: " << input->info()->data_type()
351  << " Shape: " << input->info()->tensor_shape()
352  << " BoundingBox Info img W: " << bbox_info.img_width() << " "
353  << " BoundingBox Info img H: " << bbox_info.img_height() << " "
354  << std::endl);
355 
356  return std::move(func);
357 }
358 
359 /** Create a backend channel shuffle layer function
360  *
361  * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
362  * @tparam TargetInfo Target-specific information
363  *
364  * @param[in] node Node to create the backend function for
365  *
366  * @return Backend channel shuffle layer function
367  */
368 template <typename ChannelShuffleLayerFunction, typename TargetInfo>
369 std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
370 {
371  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
372 
373  // Extract IO and info
374  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
375  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
376  const unsigned int num_groups = node.num_groups();
377 
378  // Create function
379  auto func = std::make_unique<ChannelShuffleLayerFunction>();
380  func->configure(input, output, num_groups);
381 
382  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
383  << node.name()
384  << " Type: " << node.type()
385  << " Target: " << TargetInfo::TargetType
386  << " Data Type: " << input->info()->data_type()
387  << " Shape: " << input->info()->tensor_shape()
388  << " Num groups: " << num_groups
389  << std::endl);
390 
391  return std::move(func);
392 }
393 
394 /** Create a backend layer concatenate function
395  *
396  * @tparam ConcatenateLayerFunction Backend concatenate function
397  * @tparam TargetInfo Target-specific information
398  *
399  * @param[in] node Node to create the backend function for
400  *
401  * @return Backend concatenate layer function
402  */
403 template <typename ConcatenateLayerFunction, typename TargetInfo>
404 std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
405 {
406  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
407  ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
408 
409  // Return nullptr if depth concatenate is switched off
410  if(!node.is_enabled())
411  {
412  return nullptr;
413  }
414 
415  // Extract IO and info
416  std::vector<typename TargetInfo::SrcTensorType *> inputs;
417  for(unsigned int i = 0; i < node.num_inputs(); ++i)
418  {
419  inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
420  }
421  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
422  const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
423  const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
424 
425  // Create and configure function
426  auto func = std::make_unique<ConcatenateLayerFunction>();
427  func->configure(inputs, output, concat_axis);
428 
429  // Log info
430  const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
431  std::ostringstream qss;
432  if(is_quantized)
433  {
434  qss << " Output QuantInfo: " << output->info()->quantization_info();
435  }
436  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
437  << node.name()
438  << " Type: " << node.type()
439  << " Target: " << TargetInfo::TargetType
440  << " Data Type: " << output->info()->data_type()
441  << " Shape: " << output->info()->tensor_shape()
442  << " Num Inputs: " << inputs.size()
443  << " Axis: " << concat_axis
444  << qss.str()
445  << std::endl);
446 
447  return std::move(func);
448 }
449 
450 /** Create a backend convolution layer function
451  *
452  * @tparam ConvolutionLayerFunctions Backend convolution functions
453  * @tparam TargetInfo Target-specific information
454  *
455  * @param[in] node Node to create the backend function for
456  * @param[in] ctx Graph context
457  *
458  * @return Backend convolution layer function
459  */
460 template <typename ConvolutionLayerFunctions, typename TargetInfo>
461 std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
462 {
463  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
464 
465  // Extract IO and info
466  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
467  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
468  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
469  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
470 
471  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
472 
473  if(is_quantized)
474  {
475  biases->info()->set_data_type(DataType::S32);
476  }
477 
478  const PadStrideInfo conv_info = node.convolution_info();
479  const unsigned int num_groups = node.num_groups();
480  const ConvolutionMethod conv_algorithm = node.convolution_method();
481  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
482  const ActivationLayerInfo fused_act = node.fused_activation();
483 
484  // Create and configure function (we assume that functions have been validated before creation)
485  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
486  std::unique_ptr<IFunction> func;
487  std::string func_name;
488 
489  if(conv_algorithm == ConvolutionMethod::Winograd)
490  {
491  ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
492  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
493  std::string("WinogradConvolutionLayer"), mm,
494  input, weights, biases, output, conv_info, fused_act, fast_math);
495  }
496  else if(conv_algorithm == ConvolutionMethod::Direct)
497  {
498  ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
499  std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
500  std::string("DirectConvolutionLayer"),
501  input, weights, biases, output, conv_info, fused_act);
502  }
503  else if(conv_algorithm == ConvolutionMethod::GEMM)
504  {
505  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
506  std::string("GEMMConvolutionLayer"), mm,
507  input, weights, biases, output, conv_info,
508  WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
509  }
510  else
511  {
512  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
513  std::string("GenericConvolutionLayer"), mm,
514  input, weights, biases, output, conv_info,
515  WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
516  }
517 
518  // Log info
519  std::ostringstream qss;
520  if(is_quantized)
521  {
522  qss << " Input QuantInfo: " << input->info()->quantization_info()
523  << " Weights QuantInfo: " << weights->info()->quantization_info()
524  << " Output QuantInfo: " << output->info()->quantization_info();
525  }
526  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
527  << node.name()
528  << " Type: " << func_name
529  << " Target: " << TargetInfo::TargetType
530  << " Data Type: " << input->info()->data_type()
531  << " Groups: " << num_groups
532  << " Input shape: " << input->info()->tensor_shape()
533  << " Weights shape: " << weights->info()->tensor_shape()
534  << " Output shape: " << output->info()->tensor_shape()
535  << qss.str()
536  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
537  << std::endl);
538  return std::move(func);
539 }
540 
541 /** Create a backend deconvolution layer function
542  *
543  * @tparam DeconvolutionLayerFunction Backend deconvolution function
544  * @tparam TargetInfo Target-specific information
545  *
546  * @param[in] node Node to create the backend function for
547  * @param[in] ctx Graph context
548  *
549  * @return Backend deconvolution layer function
550  */
551 template <typename DeconvolutionLayerFunction, typename TargetInfo>
552 std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
553 {
554  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
555 
556  // Extract IO and info
557  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
558  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
559  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
560  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
561 
562  const PadStrideInfo deconv_info = node.deconvolution_info();
563 
564  // Create and configure function (we assume that functions have been validated before creation)
565  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
566  std::unique_ptr<IFunction> func;
567 
568  std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
569  std::string(), mm,
570  input, weights, biases, output, deconv_info);
571 
572  // Log info
573  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
574  << node.name()
575  << " Type: " << node.type()
576  << " Target: " << TargetInfo::TargetType
577  << " Data Type: " << input->info()->data_type()
578  << " Input shape: " << input->info()->tensor_shape()
579  << " Weights shape: " << weights->info()->tensor_shape()
580  << " Output shape: " << output->info()->tensor_shape()
581  << std::endl);
582  return func;
583 }
584 
585 /** Create a backend layer depth-wise convolution function
586  *
587  * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
588  * @tparam TargetInfo Target-specific information
589  *
590  * @param[in] node Node to create the backend function for
591  *
592  * @return Backend depth-wise convolution layer function
593  */
594 template <typename DepthwiseConvolutionLayer, typename TargetInfo>
596 {
597  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
598 
599  // Extract IO and info
600  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
601  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
602  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
603  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
604 
605  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
606 
607  if(is_quantized)
608  {
609  biases->info()->set_data_type(DataType::S32);
610  }
611 
612  const PadStrideInfo conv_info = node.convolution_info();
613  const unsigned int depth_multiplier = node.depth_multiplier();
614  const ActivationLayerInfo fused_act = node.fused_activation();
615 
616  // Create and configure function (we assume that functions have been validated before creation)
617  std::unique_ptr<IFunction> func;
618  std::string func_name;
619 
620  std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
621  std::string("DepthwiseConvolutionLayer"),
622  input, weights, biases, output, conv_info, depth_multiplier, fused_act);
623 
624  // Log info
625  std::ostringstream qss;
626  if(is_quantized)
627  {
628  qss << " Input QuantInfo: " << input->info()->quantization_info()
629  << " Weights QuantInfo: " << weights->info()->quantization_info()
630  << " Output QuantInfo: " << output->info()->quantization_info();
631  }
632  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
633  << node.name()
634  << " Type: " << func_name
635  << " Target: " << TargetInfo::TargetType
636  << " Data Type: " << input->info()->data_type()
637  << " Input shape: " << input->info()->tensor_shape()
638  << " Weights shape: " << weights->info()->tensor_shape()
639  << " Output shape: " << output->info()->tensor_shape()
640  << " Depth multiplier: " << depth_multiplier
641  << qss.str()
642  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
643  << std::endl);
644  return std::move(func);
645 }
646 
647 /** Create a backend depth to space layer function
648  *
649  * @tparam DepthToSpaceLayerNode Function Backend depth to space function
650  * @tparam TargetInfo Target-specific information
651  *
652  * @param[in] node Node to create the backend function for
653  *
654  * @return Backend depth to space layer function
655  */
656 template <typename DepthToSpaceLayerFunction, typename TargetInfo>
657 std::unique_ptr<IFunction> create_depth_to_space_layer(DepthToSpaceLayerNode &node)
658 {
659  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
660 
661  // Extract IO and info
662  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
663  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
664 
665  ARM_COMPUTE_ERROR_ON(input == nullptr);
666  ARM_COMPUTE_ERROR_ON(output == nullptr);
667 
668  // Create and configure function
669  auto func = std::make_unique<DepthToSpaceLayerFunction>();
670  func->configure(input, output, node.block_shape());
671 
672  // Log info
673  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
674  << node.name()
675  << " Type: " << node.type()
676  << " Target: " << TargetInfo::TargetType
677  << " Data Type: " << input->info()->data_type()
678  << " Input shape: " << input->info()->tensor_shape()
679  << " Block Size: " << node.block_shape()
680  << " Output shape: " << output->info()->tensor_shape()
681  << std::endl);
682 
683  return std::move(func);
684 }
685 
686 /** Create a backend dequantize layer function
687  *
688  * @tparam DequantizationLayer Function Backend dequantize function
689  * @tparam TargetInfo Target-specific information
690  *
691  * @param[in] node Node to create the backend function for
692  *
693  * @return Backend dequantize layer function
694  */
695 template <typename DequantizationLayerFunction, typename TargetInfo>
696 std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
697 {
698  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
699 
700  // Extract IO and info
701  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
702  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
703 
704  ARM_COMPUTE_ERROR_ON(input == nullptr);
705  ARM_COMPUTE_ERROR_ON(output == nullptr);
706 
707  // Create and configure function
708  auto func = std::make_unique<DequantizationLayerFunction>();
709  func->configure(input, output);
710 
711  // Log info
712  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
713  << node.name()
714  << " Type: " << node.type()
715  << " Target: " << TargetInfo::TargetType
716  << " Data Type: " << input->info()->data_type()
717  << " Input shape: " << input->info()->tensor_shape()
718  << " Input quantization info: " << output->info()->quantization_info()
719  << " Output shape: " << output->info()->tensor_shape()
720  << std::endl);
721 
722  return std::move(func);
723 }
724 /** Create a backend detection output layer function
725  *
726  * @tparam DetectionOutputLayer Function Backend detection output function
727  * @tparam TargetInfo Target-specific information
728  *
729  * @param[in] node Node to create the backend function for
730  *
731  * @return Backend detection output layer function
732  */
733 template <typename DetectionOutputLayerFunction, typename TargetInfo>
735 {
736  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
737 
738  // Extract IO and info
739  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
740  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
741  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
742  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
743  const DetectionOutputLayerInfo detect_info = node.detection_output_info();
744 
745  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
746  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
747  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
748  ARM_COMPUTE_ERROR_ON(output == nullptr);
749 
750  // Create and configure function
751  auto func = std::make_unique<DetectionOutputLayerFunction>();
752  func->configure(input0, input1, input2, output, detect_info);
753 
754  // Log info
755  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
756  << node.name()
757  << " Type: " << node.type()
758  << " Target: " << TargetInfo::TargetType
759  << " Data Type: " << input0->info()->data_type()
760  << " Input0 shape: " << input0->info()->tensor_shape()
761  << " Input1 shape: " << input1->info()->tensor_shape()
762  << " Input2 shape: " << input2->info()->tensor_shape()
763  << " Output shape: " << output->info()->tensor_shape()
764  << " DetectionOutputLayer info: " << detect_info
765  << std::endl);
766 
767  return std::move(func);
768 }
769 
770 /** Create a backend detection post process layer function
771  *
772  * @tparam DetectionPostProcessLayerFunction Backend detection output function
773  * @tparam TargetInfo Target-specific information
774  *
775  * @param[in] node Node to create the backend function for
776  *
777  * @return Backend detection post process layer function
778  */
779 template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
781 {
782  validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
783 
784  // Extract IO and info
785  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
786  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
787  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
788  typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
789  typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
790  typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
791  typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
792  const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
793 
794  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
795  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
796  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
797  ARM_COMPUTE_ERROR_ON(output0 == nullptr);
798  ARM_COMPUTE_ERROR_ON(output1 == nullptr);
799  ARM_COMPUTE_ERROR_ON(output2 == nullptr);
800  ARM_COMPUTE_ERROR_ON(output3 == nullptr);
801 
802  // Create and configure function
803  auto func = std::make_unique<DetectionPostProcessLayerFunction>();
804  func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
805 
806  // Log info
807  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
808  << node.name()
809  << " Type: " << node.type()
810  << " Target: " << TargetInfo::TargetType
811  << " Data Type: " << input0->info()->data_type()
812  << " Input0 shape: " << input0->info()->tensor_shape()
813  << " Input1 shape: " << input1->info()->tensor_shape()
814  << " Input2 shape: " << input2->info()->tensor_shape()
815  << " Output0 shape: " << output0->info()->tensor_shape()
816  << " Output1 shape: " << output1->info()->tensor_shape()
817  << " Output2 shape: " << output2->info()->tensor_shape()
818  << " Output3 shape: " << output3->info()->tensor_shape()
819  << " DetectionPostProcessLayer info: " << detect_info
820  << std::endl);
821 
822  return std::move(func);
823 }
824 
825 /** Create a backend element-wise operation layer function
826  *
827  * @tparam EltwiseFunctions Backend element-wise function
828  * @tparam TargetInfo Target-specific information
829  *
830  * @param[in] node Node to create the backend function for
831  *
832  * @return Backend element-wise operation layer function
833  */
834 template <typename EltwiseFunctions, typename TargetInfo>
835 std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
836 {
837  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
838 
839  // Extract IO and info
840  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
841  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
842  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
843  const EltwiseOperation eltwise_op = node.eltwise_operation();
844  const ConvertPolicy convert_policy = node.convert_policy();
845  const ActivationLayerInfo act_info = node.fused_activation();
846  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
847  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
848  ARM_COMPUTE_ERROR_ON(output == nullptr);
849 
850  std::unique_ptr<IFunction> func = nullptr;
851  std::string func_name;
852  if(eltwise_op == EltwiseOperation::Add)
853  {
854  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
855  std::string("ArithmeticAddition"),
856  input1, input2, output, convert_policy, act_info);
857  }
858  else if(eltwise_op == EltwiseOperation::Sub)
859  {
860  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
861  std::string("ArithmeticSubtraction"),
862  input1, input2, output, convert_policy, act_info);
863  }
864  else if(eltwise_op == EltwiseOperation::Mul)
865  {
866  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
867  std::string("PixelWiseMultiplication"),
868  input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
869  }
870  else if(eltwise_op == EltwiseOperation::Max)
871  {
872  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
873  std::string("ElementwiseMaximum"),
874  input1, input2, output, act_info);
875  }
876  else
877  {
878  ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
879  }
880 
881  // Log info
882  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
883  << node.name()
884  << " Type: " << node.type()
885  << " Target: " << TargetInfo::TargetType
886  << " Operation: " << func_name
887  << " Data Type: " << input1->info()->data_type()
888  << " Shape: " << input1->info()->tensor_shape()
889  << std::endl);
890 
891  return std::move(func);
892 }
893 
894 /** Create a backend unary element-wise operation layer function
895  *
896  * @tparam UnaryEltwiseFunctions Backend unary element-wise function
897  * @tparam TargetInfo Target-specific information
898  *
899  * @param[in] node Node to create the backend function for
900  *
901  * @return Backend unary element-wise operation layer function
902  */
903 template <typename UnaryEltwiseFunctions, typename TargetInfo>
904 std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
905 {
906  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
907 
908  // Extract IO and info
909  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
910  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
911  const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
912 
913  ARM_COMPUTE_ERROR_ON(input == nullptr);
914  ARM_COMPUTE_ERROR_ON(output == nullptr);
915 
916  std::unique_ptr<IFunction> func = nullptr;
917  std::string func_name;
918  if(eltwise_op == UnaryEltwiseOperation::Exp)
919  {
920  std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
921  std::string("Exp"),
922  input, output);
923  }
924  else
925  {
926  ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
927  }
928 
929  // Log info
930  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
931  << node.name()
932  << " Type: " << node.type()
933  << " Target: " << TargetInfo::TargetType
934  << " Operation: " << func_name
935  << " Data Type: " << input->info()->data_type()
936  << " Shape: " << input->info()->tensor_shape()
937  << std::endl);
938 
939  return std::move(func);
940 }
941 
942 /** Create a backend flatten layer function
943  *
944  * @tparam FlattenLayerFunction Backend flatten function
945  * @tparam TargetInfo Target-specific information
946  *
947  * @param[in] node Node to create the backend function for
948  *
949  * @return Backend flatten layer function
950  */
951 template <typename FlattenLayerFunction, typename TargetInfo>
952 std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
953 {
954  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
955 
956  // Extract IO and info
957  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
958  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
959 
960  ARM_COMPUTE_ERROR_ON(input == nullptr);
961  ARM_COMPUTE_ERROR_ON(output == nullptr);
962 
963  // Create and configure function
964  auto func = std::make_unique<FlattenLayerFunction>();
965  func->configure(input, output);
966 
967  // Log info
968  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
969  << node.name()
970  << " Type: " << node.type()
971  << " Target: " << TargetInfo::TargetType
972  << " Data Type: " << input->info()->data_type()
973  << " Input shape: " << input->info()->tensor_shape()
974  << " Output shape: " << output->info()->tensor_shape()
975  << std::endl);
976 
977  return std::move(func);
978 }
979 
980 /** Create a backend fully connected layer function
981  *
982  * @tparam FullyConnectedLayerFunction Backend fully-connected function
983  * @tparam TargetInfo Target-specific information
984  *
985  * @param[in] node Node to create the backend function for
986  * @param[in] ctx Graph context
987  *
988  * @return Backend fully connected layer function
989  */
990 template <typename FullyConnectedLayerFunction, typename TargetInfo>
992 {
993  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
994 
995  // Extract IO and info
996  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
997  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
998  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
999  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1000  const FullyConnectedLayerInfo fc_info = node.info();
1001 
1002  ARM_COMPUTE_ERROR_ON(input == nullptr);
1003  ARM_COMPUTE_ERROR_ON(weights == nullptr);
1004  ARM_COMPUTE_ERROR_ON(output == nullptr);
1005 
1006  // Create and configure function
1007  auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
1008  auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
1009  auto func = std::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
1010  func->configure(input, weights, biases, output, fc_info);
1011 
1012  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
1013 
1014  // Log info
1015  std::ostringstream qss;
1016  if(is_quantized)
1017  {
1018  qss << " Input QuantInfo: " << input->info()->quantization_info()
1019  << " Weights QuantInfo: " << weights->info()->quantization_info()
1020  << " Output QuantInfo: " << output->info()->quantization_info();
1021  }
1022  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1023  << node.name()
1024  << " Type: " << node.type()
1025  << " Target: " << TargetInfo::TargetType
1026  << " Data Type: " << input->info()->data_type()
1027  << qss.str()
1028  << " Input shape: " << input->info()->tensor_shape()
1029  << " Weights shape: " << weights->info()->tensor_shape()
1030  << " Output shape: " << output->info()->tensor_shape()
1031  << std::endl);
1032 
1033  return std::move(func);
1034 }
1035 
1036 /** Create a backend generate proposals layer function
1037  *
1038  * @tparam GenerateProposalsLayerFunction Backend generate proposals function
1039  * @tparam TargetInfo Target-specific information
1040  *
1041  * @param[in] node Node to create the backend function for
1042  * @param[in] ctx Graph context
1043  *
1044  * @return Backend generate proposals layer function
1045  */
1046 template <typename GenerateProposalsLayerFunction, typename TargetInfo>
1048 {
1049  validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
1050 
1051  // Extract IO and info
1052  typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
1053  typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
1054  typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
1055  typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
1056  typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
1057  typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
1058  const GenerateProposalsInfo info = node.info();
1059 
1060  ARM_COMPUTE_ERROR_ON(scores == nullptr);
1061  ARM_COMPUTE_ERROR_ON(deltas == nullptr);
1062  ARM_COMPUTE_ERROR_ON(anchors == nullptr);
1063  ARM_COMPUTE_ERROR_ON(proposals == nullptr);
1064  ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
1065 
1066  // Create and configure function
1067  auto func = std::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1068  func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1069 
1070  // Log info
1071  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1072  << " Target " << TargetInfo::TargetType
1073  << " Data Type: " << scores->info()->data_type()
1074  << " Scores shape: " << scores->info()->tensor_shape()
1075  << " Deltas shape: " << deltas->info()->tensor_shape()
1076  << " Anchors shape: " << anchors->info()->tensor_shape()
1077  << " Proposals shape: " << proposals->info()->tensor_shape()
1078  << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1079  << " Scores Out shape: " << scores_out->info()->tensor_shape()
1080  << std::endl);
1081 
1082  return std::move(func);
1083 }
1084 
1085 /** Create a backend l2 normalization layer function
1086  *
1087  * @tparam NormalizationLayerFunction Backend normalization function
1088  * @tparam TargetInfo Target-specific information
1089  *
1090  * @param[in] node Node to create the backend function for
1091  * @param[in] ctx Graph context
1092  *
1093  * @return Backend normalization layer function
1094  */
1095 template <typename L2NormalizeLayerFunction, typename TargetInfo>
1096 std::unique_ptr<IFunction> create_l2_normalize_layer(L2NormalizeLayerNode &node, GraphContext &ctx)
1097 {
1098  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1099 
1100  // Extract IO and info
1101  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1102  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1103  int axis = node.axis();
1104  float epsilon = node.epsilon();
1105 
1106  ARM_COMPUTE_ERROR_ON(input == nullptr);
1107  ARM_COMPUTE_ERROR_ON(output == nullptr);
1108 
1109  // Create and configure function
1110  auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
1111  auto func = std::make_unique<L2NormalizeLayerFunction>(mm);
1112  func->configure(input, output, axis, epsilon);
1113 
1114  // Log info
1115  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1116  << node.name()
1117  << " Type: " << node.type()
1118  << " Target: " << TargetInfo::TargetType
1119  << " Data Type: " << input->info()->data_type()
1120  << " Input shape: " << input->info()->tensor_shape()
1121  << " Output shape: " << output->info()->tensor_shape()
1122  << " Axis: " << axis
1123  << " Epsilon: " << epsilon
1124  << std::endl);
1125 
1126  return std::move(func);
1127 }
1128 
1129 /** Create a backend normalization layer function
1130  *
1131  * @tparam NormalizationLayerFunction Backend normalization function
1132  * @tparam TargetInfo Target-specific information
1133  *
1134  * @param[in] node Node to create the backend function for
1135  * @param[in] ctx Graph context
1136  *
1137  * @return Backend normalization layer function
1138  */
1139 template <typename NormalizationLayerFunction, typename TargetInfo>
1140 std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
1141 {
1142  ARM_COMPUTE_UNUSED(ctx);
1143 
1144  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1145 
1146  // Extract IO and info
1147  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1148  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1149  const NormalizationLayerInfo norm_info = node.normalization_info();
1150  ARM_COMPUTE_ERROR_ON(input == nullptr);
1151  ARM_COMPUTE_ERROR_ON(output == nullptr);
1152 
1153  // Create and configure function
1154  auto func = std::make_unique<NormalizationLayerFunction>();
1155  func->configure(input, output, norm_info);
1156 
1157  // Log info
1158  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1159  << node.name()
1160  << " Type: " << node.type()
1161  << " Target: " << TargetInfo::TargetType
1162  << " Data Type: " << input->info()->data_type()
1163  << " Input shape: " << input->info()->tensor_shape()
1164  << " Output shape: " << output->info()->tensor_shape()
1165  << " Normalization info: " << norm_info.type()
1166  << std::endl);
1167 
1168  return std::move(func);
1169 }
1170 
1171 /** Create a backend normalize planar YUV layer function
1172  *
1173  * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1174  * @tparam TargetInfo Target-specific information
1175  *
1176  * @param[in] node Node to create the backend function for
1177  *
1178  * @return Backend normalize plnar YUV layer function
1179  */
1180 template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1182 {
1183  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1184 
1185  // Extract IO and info
1186  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1187  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1188  typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1189  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1190  ARM_COMPUTE_ERROR_ON(input == nullptr);
1191  ARM_COMPUTE_ERROR_ON(mean == nullptr);
1192  ARM_COMPUTE_ERROR_ON(std == nullptr);
1193  ARM_COMPUTE_ERROR_ON(output == nullptr);
1194 
1195  // Create and configure function
1196  auto func = std::make_unique<NormalizePlanarYUVLayerFunction>();
1197  func->configure(input, output, mean, std);
1198 
1199  // Log info
1200  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1201  << node.name()
1202  << " Type: " << node.type()
1203  << " Target: " << TargetInfo::TargetType
1204  << " Data Type: " << input->info()->data_type()
1205  << " Shape: " << input->info()->tensor_shape()
1206  << std::endl);
1207 
1208  return std::move(func);
1209 }
1210 
1211 /** Create a backend pad layer function
1212  *
1213  * @tparam PadLayerFunction Backend pad function
1214  * @tparam TargetInfo Target-specific information
1215  *
1216  * @param[in] node Node to create the backend function for
1217  *
1218  * @return Backend pad layer function
1219  */
1220 template <typename PadLayerFunction, typename TargetInfo>
1221 std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1222 {
1223  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1224 
1225  // Extract IO and info
1226  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1227  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1228  const PaddingList &padding = node.padding();
1229  const PixelValue pad_value = node.pad_value();
1230  ARM_COMPUTE_ERROR_ON(input == nullptr);
1231  ARM_COMPUTE_ERROR_ON(output == nullptr);
1232 
1233  // Create and configure function
1234  auto func = std::make_unique<PadLayerFunction>();
1235  func->configure(input, output, padding, pad_value);
1236 
1237  // Log info
1238  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1239  << node.name()
1240  << " Type: " << node.type()
1241  << " Target: " << TargetInfo::TargetType
1242  << " Data Type: " << input->info()->data_type()
1243  << " Input shape: " << input->info()->tensor_shape()
1244  << " Output shape: " << output->info()->tensor_shape()
1245  << std::endl);
1246 
1247  return std::move(func);
1248 }
1249 
1250 /** Create a backend permute layer function
1251  *
1252  * @tparam PermuteLayerFunction Backend permute function
1253  * @tparam TargetInfo Target-specific information
1254  *
1255  * @param[in] node Node to create the backend function for
1256  *
1257  * @return Backend permute layer function
1258  */
1259 template <typename PermuteLayerFunction, typename TargetInfo>
1260 std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1261 {
1262  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1263 
1264  // Extract IO and info
1265  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1266  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1267  const PermutationVector &perm = node.permutation_vector();
1268  ARM_COMPUTE_ERROR_ON(input == nullptr);
1269  ARM_COMPUTE_ERROR_ON(output == nullptr);
1270 
1271  // Create and configure function
1272  auto func = std::make_unique<PermuteLayerFunction>();
1273  func->configure(input, output, perm);
1274 
1275  // Log info
1276  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1277  << node.name()
1278  << " Type: " << node.type()
1279  << " Target: " << TargetInfo::TargetType
1280  << " Data Type: " << input->info()->data_type()
1281  << " Input shape: " << input->info()->tensor_shape()
1282  << " Output shape: " << output->info()->tensor_shape()
1283  << " Permutation vector: " << perm
1284  << std::endl);
1285 
1286  return std::move(func);
1287 }
1288 
1289 /** Create a backend pooling layer function
1290  *
1291  * @tparam PoolingLayerFunction Backend pooling function
1292  * @tparam TargetInfo Target-specific information
1293  *
1294  * @param[in] node Node to create the backend function for
1295  *
1296  * @return Backend pooling layer function
1297  */
1298 template <typename PoolingLayerFunction, typename TargetInfo>
1299 std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1300 {
1301  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1302 
1303  // Extract IO and info
1304  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1305  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1306  const PoolingLayerInfo pool_info = node.pooling_info();
1307  ARM_COMPUTE_ERROR_ON(input == nullptr);
1308  ARM_COMPUTE_ERROR_ON(output == nullptr);
1309 
1310  // Create and configure function
1311  auto func = std::make_unique<PoolingLayerFunction>();
1312  func->configure(input, output, pool_info);
1313 
1314  // Log info
1315  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1316  << node.name()
1317  << " Type: " << node.type()
1318  << " Target: " << TargetInfo::TargetType
1319  << " Data Type: " << input->info()->data_type()
1320  << " Input shape: " << input->info()->tensor_shape()
1321  << " Output shape: " << output->info()->tensor_shape()
1322  << " Pooling info: " << pool_info.pool_type
1323  << std::endl);
1324 
1325  return std::move(func);
1326 }
1327 
1328 /** Create a backend PRelu layer function
1329  *
1330  * @tparam PReluFunction Backend PRelu function
1331  * @tparam TargetInfo Target-specific information
1332  *
1333  * @param[in] node Node to create the backend function for
1334  *
1335  * @return Backend PRelu layer function
1336  */
1337 template <typename PReluFunction, typename TargetInfo>
1338 std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1339 {
1340  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1341 
1342  // Extract IO and info
1343  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1344  typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1345  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1346  ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1347  ARM_COMPUTE_ERROR_ON(output == nullptr);
1348 
1349  // Create and configure function
1350  auto func = std::make_unique<PReluFunction>();
1351  func->configure(input, alpha, output);
1352 
1353  // Log info
1354  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1355  << node.name()
1356  << " Type: " << node.type()
1357  << " Target: " << TargetInfo::TargetType
1358  << " Data Type: " << input->info()->data_type()
1359  << " Input shape: " << input->info()->tensor_shape()
1360  << " Output shape: " << output->info()->tensor_shape()
1361  << std::endl);
1362 
1363  return std::move(func);
1364 }
1365 
1366 /** Create a backend print layer function
1367  *
1368  * @tparam TargetInfo Target-specific information
1369  *
1370  * @param[in] node Node to create the backend function for
1371  *
1372  * @return Backend print layer function
1373  */
1374 template <typename TargetInfo>
1375 std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1376 {
1377  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1378 
1379  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1380  ARM_COMPUTE_ERROR_ON(input == nullptr);
1381  ARM_COMPUTE_UNUSED(input);
1382 
1383  // Log info
1384  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1385  << node.name()
1386  << " Type: " << node.type()
1387  << " Target: " << TargetInfo::TargetType
1388  << " Data Type: " << input->info()->data_type()
1389  << " Input shape: " << input->info()->tensor_shape()
1390  << std::endl);
1391 
1392  return nullptr;
1393 }
1394 
1395 /** Create a backend priorbox layer function
1396  *
1397  * @tparam PriorBoxLayerFunction Backend priorbox function
1398  * @tparam TargetInfo Target-specific information
1399  *
1400  * @param[in] node Node to create the backend function for
1401  *
1402  * @return Backend priorbox layer function
1403  */
1404 template <typename PriorBoxLayerFunction, typename TargetInfo>
1405 std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1406 {
1407  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1408 
1409  // Extract IO and info
1410  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1411  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1412  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1413  const PriorBoxLayerInfo prior_info = node.priorbox_info();
1414  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1415  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1416  ARM_COMPUTE_ERROR_ON(output == nullptr);
1417 
1418  // Create and configure function
1419  auto func = std::make_unique<PriorBoxLayerFunction>();
1420  func->configure(input0, input1, output, prior_info);
1421 
1422  // Log info
1423  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1424  << node.name()
1425  << " Type: " << node.type()
1426  << " Target: " << TargetInfo::TargetType
1427  << " Data Type: " << input0->info()->data_type()
1428  << " Input0 shape: " << input0->info()->tensor_shape()
1429  << " Input1 shape: " << input1->info()->tensor_shape()
1430  << " Output shape: " << output->info()->tensor_shape()
1431  << " PriorBoxLayer info: " << prior_info
1432  << std::endl);
1433 
1434  return std::move(func);
1435 }
1436 
1437 /** Create a backend quantization layer function
1438  *
1439  * @tparam QuantizationLayerFunction Backend quantization function
1440  * @tparam TargetInfo Target-specific information
1441  *
1442  * @param[in] node Node to create the backend function for
1443  *
1444  * @return Backend quantization layer function
1445  */
1446 template <typename QuantizationLayerFunction, typename TargetInfo>
1447 std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1448 {
1449  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1450 
1451  // Extract IO and info
1452  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1453  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1454  ARM_COMPUTE_ERROR_ON(input == nullptr);
1455  ARM_COMPUTE_ERROR_ON(output == nullptr);
1456 
1457  // Create and configure function
1458  auto func = std::make_unique<QuantizationLayerFunction>();
1459  func->configure(input, output);
1460 
1461  // Log info
1462  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1463  << node.name()
1464  << " Type: " << node.type()
1465  << " Target: " << TargetInfo::TargetType
1466  << " Data Type: " << input->info()->data_type()
1467  << " Input shape: " << input->info()->tensor_shape()
1468  << " Output shape: " << output->info()->tensor_shape()
1469  << std::endl);
1470 
1471  return std::move(func);
1472 }
1473 
1474 /** Create a backend reduction operation layer function
1475  *
1476  * @tparam ReductionOperationFunction Backend reduction operation function
1477  * @tparam TargetInfo Target-specific information
1478  *
1479  * @param[in] node Node to create the backend function for
1480  * @param[in] ctx Graph context
1481  *
1482  * @return Backend reduction sum layer function
1483  */
1484 template <typename ReductionOperationFunction, typename TargetInfo>
1486 {
1487  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1488 
1489  // Extract IO and info
1490  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1491  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1492  ReductionOperation op = node.op();
1493  int axis = node.axis();
1494  bool keep_dims = node.keep_dims();
1495  ARM_COMPUTE_ERROR_ON(input == nullptr);
1496  ARM_COMPUTE_ERROR_ON(output == nullptr);
1497 
1498  // Create and configure function
1499  auto func = std::make_unique<ReductionOperationFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1500  func->configure(input, output, axis, op, keep_dims);
1501 
1502  // Log info
1503  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1504  << node.name()
1505  << " Type: " << node.type()
1506  << " Target: " << TargetInfo::TargetType
1507  << " Data Type: " << input->info()->data_type()
1508  << " Input shape: " << input->info()->tensor_shape()
1509  << " Output shape: " << output->info()->tensor_shape()
1510  << " Operation: " << op
1511  << " Axis: " << axis
1512  << " Keep dimensions:" << keep_dims
1513  << std::endl);
1514 
1515  return std::move(func);
1516 }
1517 
1518 /** Create a backend reorg layer function
1519  *
1520  * @tparam ReorgLayerFunction Backend reorg function
1521  * @tparam TargetInfo Target-specific information
1522  *
1523  * @param[in] node Node to create the backend function for
1524  *
1525  * @return Backend reshape layer function
1526  */
1527 template <typename ReorgLayerFunction, typename TargetInfo>
1528 std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1529 {
1530  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1531 
1532  // Extract IO and info
1533  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1534  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1535  ARM_COMPUTE_ERROR_ON(input == nullptr);
1536  ARM_COMPUTE_ERROR_ON(output == nullptr);
1537 
1538  // Create and configure function
1539  auto func = std::make_unique<ReorgLayerFunction>();
1540  func->configure(input, output, node.stride());
1541 
1542  // Log info
1543  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1544  << node.name()
1545  << " Type: " << node.type()
1546  << " Target: " << TargetInfo::TargetType
1547  << " Data Type: " << input->info()->data_type()
1548  << " Input shape: " << input->info()->tensor_shape()
1549  << " Output shape: " << output->info()->tensor_shape()
1550  << std::endl);
1551 
1552  return std::move(func);
1553 }
1554 
1555 /** Create a backend reshape layer function
1556  *
1557  * @tparam ReshapeLayerFunction Backend reshape function
1558  * @tparam TargetInfo Target-specific information
1559  *
1560  * @param[in] node Node to create the backend function for
1561  *
1562  * @return Backend reshape layer function
1563  */
1564 template <typename ReshapeLayerFunction, typename TargetInfo>
1565 std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1566 {
1567  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1568 
1569  // Extract IO and info
1570  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1571  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1572  ARM_COMPUTE_ERROR_ON(input == nullptr);
1573  ARM_COMPUTE_ERROR_ON(output == nullptr);
1574 
1575  // Create and configure function
1576  auto func = std::make_unique<ReshapeLayerFunction>();
1577  func->configure(input, output);
1578 
1579  // Log info
1580  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1581  << node.name()
1582  << " Type: " << node.type()
1583  << " Target: " << TargetInfo::TargetType
1584  << " Data Type: " << input->info()->data_type()
1585  << " Input shape: " << input->info()->tensor_shape()
1586  << " Output shape: " << output->info()->tensor_shape()
1587  << std::endl);
1588 
1589  return std::move(func);
1590 }
1591 
1592 /** Create a backend resize layer function
1593  *
1594  * @tparam ResizeLayerFunction Backend resize function
1595  * @tparam TargetInfo Target-specific information
1596  *
1597  * @param[in] node Node to create the backend function for
1598  *
1599  * @return Backend resize layer function
1600  */
1601 template <typename ResizeLayerFunction, typename TargetInfo>
1602 std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1603 {
1604  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1605 
1606  // Extract IO and info
1607  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1608  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1609  ARM_COMPUTE_ERROR_ON(input == nullptr);
1610  ARM_COMPUTE_ERROR_ON(output == nullptr);
1611  const InterpolationPolicy policy = node.policy();
1612 
1613  // Create and configure function
1614  auto func = std::make_unique<ResizeLayerFunction>();
1615  func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false });
1616 
1617  // Log info
1618  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1619  << node.name()
1620  << " Type: " << node.type()
1621  << " Target: " << TargetInfo::TargetType
1622  << " Data Type: " << input->info()->data_type()
1623  << " Input shape: " << input->info()->tensor_shape()
1624  << " Output shape: " << output->info()->tensor_shape()
1625  << " Interpolation: " << policy
1626  << std::endl);
1627 
1628  return std::move(func);
1629 }
1630 
1631 /** Create a backend ROI align layer function
1632  *
1633  * @tparam ROIAlignLayerFunction ROI Align function
1634  * @tparam TargetInfo Target-specific information
1635  *
1636  * @param[in] node Node to create the backend function for
1637  *
1638  * @return ROI Align layer function
1639  */
1640 template <typename ROIAlignLayerFunction, typename TargetInfo>
1641 std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1642 {
1643  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1644 
1645  // Extract IO and info
1646  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1647  typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1648  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1649  ARM_COMPUTE_ERROR_ON(input == nullptr);
1650  ARM_COMPUTE_ERROR_ON(output == nullptr);
1651  ARM_COMPUTE_ERROR_ON(rois == nullptr);
1652 
1653  const ROIPoolingLayerInfo pool_info = node.pooling_info();
1654 
1655  // Create and configure function
1656  auto func = std::make_unique<ROIAlignLayerFunction>();
1657 
1658  func->configure(input, rois, output, pool_info);
1659 
1660  // Log info
1661  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1662  << node.name()
1663  << " Type: " << node.type()
1664  << " Target: " << TargetInfo::TargetType
1665  << " Data Type: " << input->info()->data_type()
1666  << " Input shape: " << input->info()->tensor_shape()
1667  << " Output shape: " << output->info()->tensor_shape()
1668  << " ROIs shape: " << rois->info()->tensor_shape()
1669  << " ROIPooling width: " << pool_info.pooled_width()
1670  << " ROIPooling height: " << pool_info.pooled_height()
1671  << std::endl);
1672 
1673  return std::move(func);
1674 }
1675 
1676 /** Create a backend slice layer function
1677  *
1678  * @tparam SliceLayerFunction Backend slice function
1679  * @tparam TargetInfo Target-specific information
1680  *
1681  * @param[in] node Node to create the backend function for
1682  *
1683  * @return Backend slice layer function
1684  */
1685 template <typename SliceLayerFunction, typename TargetInfo>
1686 std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1687 {
1688  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1689 
1690  // Extract IO and info
1691  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1692  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1693  ARM_COMPUTE_ERROR_ON(input == nullptr);
1694  ARM_COMPUTE_ERROR_ON(output == nullptr);
1695 
1696  // Create and configure function
1697  auto func = std::make_unique<SliceLayerFunction>();
1698  func->configure(input, output, node.starts(), node.ends());
1699 
1700  // Log info
1701  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1702  << node.name()
1703  << " Type: " << node.type()
1704  << " Target: " << TargetInfo::TargetType
1705  << " Data Type: " << input->info()->data_type()
1706  << " Input shape: " << input->info()->tensor_shape()
1707  << " Output shape: " << output->info()->tensor_shape()
1708  << std::endl);
1709 
1710  return std::move(func);
1711 }
1712 
1713 /** Create a backend softmax layer function
1714  *
1715  * @tparam SoftmaxLayerFunction Backend softmax function
1716  * @tparam TargetInfo Target-specific information
1717  *
1718  * @param[in] node Node to create the backend function for
1719  * @param[in] ctx Graph context
1720  *
1721  * @return Backend softmax layer function
1722  */
1723 template <typename SoftmaxLayerFunction, typename TargetInfo>
1724 std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1725 {
1726  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1727 
1728  // Extract IO and info
1729  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1730  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1731  const float beta = node.beta();
1732  ARM_COMPUTE_ERROR_ON(input == nullptr);
1733  ARM_COMPUTE_ERROR_ON(output == nullptr);
1734 
1735  // Create and configure function
1736  auto func = std::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1737  func->configure(input, output, beta);
1738 
1739  // Log info
1740  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1741  << node.name()
1742  << " Type: " << node.type()
1743  << " Target: " << TargetInfo::TargetType
1744  << " Data Type: " << input->info()->data_type()
1745  << " Input shape: " << input->info()->tensor_shape()
1746  << " Output shape: " << output->info()->tensor_shape()
1747  << std::endl);
1748 
1749  return std::move(func);
1750 }
1751 
1752 /** Create a backend layer stack function
1753  *
1754  * @tparam StackLayerFunction Backend stack function
1755  * @tparam TargetInfo Target-specific information
1756  *
1757  * @param[in] node Node to create the backend function for
1758  *
1759  * @return Backend stack layer function
1760  */
1761 template <typename StackLayerFunction, typename TargetInfo>
1762 std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1763 {
1764  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1765  ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1766 
1767  // Extract IO and info
1768  std::vector<typename TargetInfo::TensorType *> inputs;
1769  for(unsigned int i = 0; i < node.num_inputs(); ++i)
1770  {
1771  inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1772  }
1773  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1774  const int axis = node.axis();
1775 
1776  // Create and configure function
1777  auto func = std::make_unique<StackLayerFunction>();
1778  func->configure(inputs, axis, output);
1779 
1780  // Log info
1781  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1782  << node.name()
1783  << " Type: " << node.type()
1784  << " Target: " << TargetInfo::TargetType
1785  << " Data Type: " << output->info()->data_type()
1786  << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1787  << " Output shape: " << output->info()->tensor_shape()
1788  << " Num Inputs: " << inputs.size()
1789  << " Axis: " << axis
1790  << std::endl);
1791 
1792  return std::move(func);
1793 }
1794 
1795 /** Create a backend slice layer function
1796  *
1797  * @tparam StridedSliceLayerFunction Backend strided slice function
1798  * @tparam TargetInfo Target-specific information
1799  *
1800  * @param[in] node Node to create the backend function for
1801  *
1802  * @return Backend strided slice layer function
1803  */
1804 template <typename StridedSliceLayerFunction, typename TargetInfo>
1805 std::unique_ptr<IFunction> create_strided_slice_layer(StridedSliceLayerNode &node)
1806 {
1807  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1808 
1809  // Extract IO and info
1810  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1811  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1812  Coordinates starts = node.starts();
1813  Coordinates ends = node.ends();
1814  BiStrides strides = node.strides();
1815  StridedSliceLayerInfo info = node.strided_slice_info();
1816 
1817  ARM_COMPUTE_ERROR_ON(input == nullptr);
1818  ARM_COMPUTE_ERROR_ON(output == nullptr);
1819 
1820  // Create and configure function
1821  auto func = std::make_unique<StridedSliceLayerFunction>();
1822  func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
1823 
1824  // Log info
1825  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1826  << node.name()
1827  << " Type: " << node.type()
1828  << " Target: " << TargetInfo::TargetType
1829  << " Data Type: " << input->info()->data_type()
1830  << " Input shape: " << input->info()->tensor_shape()
1831  << " Output shape: " << output->info()->tensor_shape()
1832  << std::endl);
1833 
1834  return std::move(func);
1835 }
1836 } // namespace detail
1837 } // namespace backends
1838 } // namespace graph
1839 } // namespace arm_compute
1840 
1841 #endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */
std::unique_ptr< IFunction > create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
Create a backend normalization layer function.
std::unique_ptr< IFunction > create_dequantization_layer(DequantizationLayerNode &node)
Create a backend dequantize layer function.
std::string name() const
Returns node&#39;s name.
Definition: INode.cpp:107
Class describing the value of a pixel for any image format.
Definition: PixelValue.h:34
InterpolationPolicy
Interpolation method.
Definition: Types.h:392
std::unique_ptr< IFunction > create_arg_min_max_layer(ArgMinMaxLayerNode &node)
Creates a backend argminmax layer function.
Generate Proposals Information class.
Definition: Types.h:1352
EltwiseOperation
Supported Element-wise operations.
Definition: Types.h:103
std::unique_ptr< IFunction > create_slice_layer(SliceLayerNode &node)
Create a backend slice layer function.
std::unique_ptr< IFunction > create_eltwise_layer(EltwiseLayerNode &node)
Create a backend element-wise operation layer function.
bool enabled() const
Check if initialised.
Definition: Types.h:1600
std::vector< PaddingInfo > PaddingList
List of padding information.
Definition: Types.h:481
ReductionOperation
Available reduction operations.
Definition: Types.h:521
DataLayoutDimension concatenation_axis() const
Concatenation axis parameter accessor.
ITensorHandle * handle()
Backend tensor handle accessor.
Definition: Tensor.cpp:55
std::unique_ptr< IFunction > create_batch_normalization_layer(BatchNormalizationLayerNode &node)
Create a backend batch normalization layer function.
std::unique_ptr< IFunction > create_depth_to_space_layer(DepthToSpaceLayerNode &node)
Create a backend depth to space layer function.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
std::unique_ptr< IFunction > create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Create a backend batch normalization layer function.
Target assigned_target() const
Returns assigned target for this node.
Definition: INode.cpp:198
TensorType
Memory type.
Definition: Types.h:38
std::unique_ptr< IFunction > create_prelu_layer(PReluLayerNode &node)
Create a backend PRelu layer function.
std::unique_ptr< IFunction > create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
Create a backend layer depth-wise convolution function.
size_t num_outputs() const
Returns number of outputs of the node.
Definition: INode.cpp:183
std::unique_ptr< IFunction > create_resize_layer(ResizeLayerNode &node)
Create a backend resize layer function.
Wrapper function to first apply {NE, CL}BatchNormalizationLayer on the weights and then run {NE...
Normalization Layer Information class.
Definition: Types.h:1647
std::unique_ptr< IFunction > create_permute_layer(PermuteLayerNode &node)
Create a backend permute layer function.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
std::unique_ptr< IFunction > create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
Create a backend generate proposals layer function.
NodeType type() const override
Returns node&#39;s type.
std::unique_ptr< IFunction > create_l2_normalize_layer(L2NormalizeLayerNode &node, GraphContext &ctx)
Create a backend l2 normalization layer function.
const DataLayout data_layout
Definition: Im2Col.cpp:151
Fully connected layer info.
Definition: Types.h:1613
Fast math enabled for Convolution layer.
std::unique_ptr< IFunction > create_flatten_layer(FlattenLayerNode &node)
Create a backend flatten layer function.
unsigned int pooled_width() const
Get the pooled width of the layer.
Definition: Types.h:1324
Wrapper function to first apply {NE, CL}BatchNormalizationLayer on the weights and then run {NE...
std::unique_ptr< IFunction > create_pad_layer(PadLayerNode &node)
Create a backend pad layer function.
Activation Layer Information class.
Definition: Types.h:1550
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::unique_ptr< arm_compute::IFunction > create_stack_layer(StackLayerNode &node)
Create a backend layer stack function.
Copyright (c) 2017-2021 Arm Limited.
Samples are taken at pixel center.
Convolution Layer Weights Information class.
Definition: Types.h:1765
std::unique_ptr< IFunction > create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
Create a backend softmax layer function.
1 channel, 1 S32 per channel
TensorDescriptor & desc()
TensorInfo metadata accessor.
Definition: Tensor.cpp:40
std::unique_ptr< IFunction > create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Create a backend fused depthwise convolution batch normalization layer function.
Node interface.
Definition: INode.h:45
std::unique_ptr< IFunction > create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
Create a backend convolution layer function.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
std::unique_ptr< IFunction > create_roi_align_layer(ROIAlignLayerNode &node)
Create a backend ROI align layer function.
bool is_enabled() const
Enabled parameter accessor.
Tensor * output(size_t idx) const
Returns the tensor of a given output of the node.
Definition: INode.cpp:158
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
const unsigned int num_groups
Definition: Im2Col.cpp:153
Coordinates of an item.
Definition: Coordinates.h:37
Pooling Layer Information struct.
Definition: Types.h:1214
void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
std::unique_ptr< IFunction > create_reduction_operation_layer(ReductionLayerNode &node, GraphContext &ctx)
Create a backend reduction operation layer function.
std::unique_ptr< IFunction > create_priorbox_layer(PriorBoxLayerNode &node)
Create a backend priorbox layer function.
NodeID id() const
Returns node&#39;s ID.
Definition: INode.cpp:102
PriorBox layer info.
Definition: Types.h:839
std::unique_ptr< IFunction > create_detection_output_layer(DetectionOutputLayerNode &node)
Create a backend detection output layer function.
Padding and stride information class.
Definition: Types.h:722
Bounding Box Transform information class.
Definition: Types.h:1483
std::unique_ptr< IFunction > create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
Create a backend detection post process layer function.
Tensor handle interface object.
Definition: ITensorHandle.h:38
std::unique_ptr< IFunction > create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
Create a backend deconvolution layer function.
std::unique_ptr< IFunction > create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
Create a backend unary element-wise operation layer function.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1190
bool is_in_place_operation(void *input, void *output)
Checks if an operation is in place.
Definition: Utils.h:77
Strides of an item in bytes.
Definition: Strides.h:37
std::unique_ptr< IFunction > create_print_layer(PrintLayerNode &node)
Create a backend print layer function.
FloorUKernelPtr func
Detection Output layer info.
Definition: Types.h:976
DetectionPostProcess Layer node.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
std::unique_ptr< IFunction > create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
Create a backend fully connected layer function.
std::unique_ptr< IFunction > create_activation_layer(ActivationLayerNode &node)
Creates a backend activation layer function.
int axis() const
Stack axis parameter accessor.
int32_t shrink_axis_mask() const
Definition: Types.h:1753
std::unique_ptr< IFunction > create_strided_slice_layer(StridedSliceLayerNode &node)
Create a backend slice layer function.
unsigned int pooled_height() const
Get the pooled height of the layer.
Definition: Types.h:1329
ROI Pooling Layer Information class.
Definition: Types.h:1309
Class for specifying the size of an image or rectangle.
Definition: Size2D.h:34
std::unique_ptr< IFunction > create_quantization_layer(QuantizationLayerNode &node)
Create a backend quantization layer function.
std::string to_string(const ICLTensor &arg)
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89
std::unique_ptr< IFunction > create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
Create a backend normalize planar YUV layer function.
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
ConvolutionMethod
Supported Convolution layer methods.
Definition: Types.h:118
std::unique_ptr< IFunction > create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
Create a backend bounding box transform layer function.
Detection Output layer info.
Definition: Types.h:1095
size_t num_inputs() const
Returns number of inputs of the node.
Definition: INode.cpp:178
TargetInfo::TensorType * get_backing_tensor(arm_compute::graph::Tensor *tensor)
Returns backing tensor of a given tensor.
ActivationFunction activation() const
Get the type of activation function.
Definition: Types.h:1585
virtual NodeType type() const =0
Returns node&#39;s type.
std::shared_ptr< IWeightsManager > get_weights_manager(GraphContext &ctx, Target target)
Returns the weights manager for a given target.
Definition: Utils.h:102
Tensor * input(size_t idx) const
Returns the tensor of a given input of the node.
Definition: INode.cpp:150
NodeType type() const override
Returns node&#39;s type.
std::unique_ptr< IFunction > create_pooling_layer(PoolingLayerNode &node)
Create a backend pooling layer function.
DataLayout
[DataLayout enum definition]
Definition: Types.h:120
std::unique_ptr< arm_compute::IFunction > create_concatenate_layer(ConcatenateLayerNode &node)
Create a backend layer concatenate function.
ConvertPolicy
Policy to handle overflow.
Definition: Types.h:385
std::unique_ptr< IFunction > create_reshape_layer(ReshapeLayerNode &node)
Create a backend reshape layer function.
size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get index of a tensor&#39;s given dimension depending on its layout.
Definition: Utils.cpp:129
std::unique_ptr< IFunction > create_reorg_layer(ReorgLayerNode &node)
Create a backend reorg layer function.
Tensor object.
Definition: Tensor.h:41
std::unique_ptr< IFunction > create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
Create a backend channel shuffle layer function.
UnaryEltwiseOperation
Supported Unary Element-wise operations.
Definition: Types.h:112