Compute Library
 21.05
arm_compute::graph::backends::detail Namespace Reference

Data Structures

class  BackendRegistrar
 Helper class to statically register a backend. More...
 

Functions

template<typename TargetInfo >
TargetInfo::TensorType * get_backing_tensor (arm_compute::graph::Tensor *tensor)
 Returns backing tensor of a given tensor. More...
 
template<typename TargetInfo >
void validate_node (const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
 
template<typename ActivationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_activation_layer (ActivationLayerNode &node)
 Creates a backend activation layer function. More...
 
template<typename ArgMinMaxLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_arg_min_max_layer (ArgMinMaxLayerNode &node)
 Creates a backend argminmax layer function. More...
 
template<typename BatchNormalizationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_batch_normalization_layer (BatchNormalizationLayerNode &node)
 Create a backend batch normalization layer function. More...
 
template<typename FusedLayerTypes , typename TargetInfo >
std::unique_ptr< IFunctioncreate_fused_convolution_batch_normalization_layer (FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
 Create a backend batch normalization layer function. More...
 
template<typename FusedLayerTypes , typename TargetInfo >
std::unique_ptr< IFunctioncreate_fused_depthwise_convolution_batch_normalization_layer (FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
 Create a backend fused depthwise convolution batch normalization layer function. More...
 
template<typename BoundingBoxTransformLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_bounding_box_transform_layer (BoundingBoxTransformLayerNode &node)
 Create a backend bounding box transform layer function. More...
 
template<typename ChannelShuffleLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_channel_shuffle_layer (ChannelShuffleLayerNode &node)
 Create a backend channel shuffle layer function. More...
 
template<typename ConcatenateLayerFunction , typename TargetInfo >
std::unique_ptr< arm_compute::IFunctioncreate_concatenate_layer (ConcatenateLayerNode &node)
 Create a backend layer concatenate function. More...
 
template<typename ConvolutionLayerFunctions , typename TargetInfo >
std::unique_ptr< IFunctioncreate_convolution_layer (ConvolutionLayerNode &node, GraphContext &ctx)
 Create a backend convolution layer function. More...
 
template<typename DeconvolutionLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_deconvolution_layer (DeconvolutionLayerNode &node, GraphContext &ctx)
 Create a backend deconvolution layer function. More...
 
template<typename DepthwiseConvolutionLayer , typename TargetInfo >
std::unique_ptr< IFunctioncreate_depthwise_convolution_layer (DepthwiseConvolutionLayerNode &node)
 Create a backend layer depth-wise convolution function. More...
 
template<typename DepthToSpaceLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_depth_to_space_layer (DepthToSpaceLayerNode &node)
 Create a backend depth to space layer function. More...
 
template<typename DequantizationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_dequantization_layer (DequantizationLayerNode &node)
 Create a backend dequantize layer function. More...
 
template<typename DetectionOutputLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_detection_output_layer (DetectionOutputLayerNode &node)
 Create a backend detection output layer function. More...
 
template<typename DetectionPostProcessLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_detection_post_process_layer (DetectionPostProcessLayerNode &node)
 Create a backend detection post process layer function. More...
 
template<typename EltwiseFunctions , typename TargetInfo >
std::unique_ptr< IFunctioncreate_eltwise_layer (EltwiseLayerNode &node)
 Create a backend element-wise operation layer function. More...
 
template<typename UnaryEltwiseFunctions , typename TargetInfo >
std::unique_ptr< IFunctioncreate_unary_eltwise_layer (UnaryEltwiseLayerNode &node)
 Create a backend unary element-wise operation layer function. More...
 
template<typename FlattenLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_flatten_layer (FlattenLayerNode &node)
 Create a backend flatten layer function. More...
 
template<typename FullyConnectedLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_fully_connected_layer (FullyConnectedLayerNode &node, GraphContext &ctx)
 Create a backend fully connected layer function. More...
 
template<typename GenerateProposalsLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_generate_proposals_layer (GenerateProposalsLayerNode &node, GraphContext &ctx)
 Create a backend generate proposals layer function. More...
 
template<typename L2NormalizeLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_l2_normalize_layer (L2NormalizeLayerNode &node, GraphContext &ctx)
 Create a backend l2 normalization layer function. More...
 
template<typename NormalizationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_normalization_layer (NormalizationLayerNode &node, GraphContext &ctx)
 Create a backend normalization layer function. More...
 
template<typename NormalizePlanarYUVLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_normalize_planar_yuv_layer (NormalizePlanarYUVLayerNode &node)
 Create a backend normalize planar YUV layer function. More...
 
template<typename PadLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_pad_layer (PadLayerNode &node)
 Create a backend pad layer function. More...
 
template<typename PermuteLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_permute_layer (PermuteLayerNode &node)
 Create a backend permute layer function. More...
 
template<typename PoolingLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_pooling_layer (PoolingLayerNode &node)
 Create a backend pooling layer function. More...
 
template<typename PReluFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_prelu_layer (PReluLayerNode &node)
 Create a backend PRelu layer function. More...
 
template<typename TargetInfo >
std::unique_ptr< IFunctioncreate_print_layer (PrintLayerNode &node)
 Create a backend print layer function. More...
 
template<typename PriorBoxLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_priorbox_layer (PriorBoxLayerNode &node)
 Create a backend priorbox layer function. More...
 
template<typename QuantizationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_quantization_layer (QuantizationLayerNode &node)
 Create a backend quantization layer function. More...
 
template<typename ReductionOperationFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_reduction_operation_layer (ReductionLayerNode &node, GraphContext &ctx)
 Create a backend reduction operation layer function. More...
 
template<typename ReorgLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_reorg_layer (ReorgLayerNode &node)
 Create a backend reorg layer function. More...
 
template<typename ReshapeLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_reshape_layer (ReshapeLayerNode &node)
 Create a backend reshape layer function. More...
 
template<typename ResizeLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_resize_layer (ResizeLayerNode &node)
 Create a backend resize layer function. More...
 
template<typename ROIAlignLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_roi_align_layer (ROIAlignLayerNode &node)
 Create a backend ROI align layer function. More...
 
template<typename SliceLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_slice_layer (SliceLayerNode &node)
 Create a backend slice layer function. More...
 
template<typename SoftmaxLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_softmax_layer (SoftmaxLayerNode &node, GraphContext &ctx)
 Create a backend softmax layer function. More...
 
template<typename StackLayerFunction , typename TargetInfo >
std::unique_ptr< arm_compute::IFunctioncreate_stack_layer (StackLayerNode &node)
 Create a backend layer stack function. More...
 
template<typename StridedSliceLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_strided_slice_layer (StridedSliceLayerNode &node)
 Create a backend slice layer function. More...
 
arm_compute::ITensorInfoget_backing_tensor_info (arm_compute::graph::Tensor *tensor)
 Returns backing tensor info of a given tensor. More...
 
template<typename ArgMinMaxLayer >
Status validate_arg_min_max_layer (ArgMinMaxLayerNode &node)
 Validates a ArgMinMax layer node. More...
 
template<typename BoundingBoxTransformLayer >
Status validate_bounding_box_transform_layer (BoundingBoxTransformLayerNode &node)
 Validates a Bounding Box Transform layer node. More...
 
template<typename ChannelShuffleLayer >
Status validate_channel_shuffle_layer (ChannelShuffleLayerNode &node)
 Validates a Channel Shuffle layer node. More...
 
template<typename ConvolutionLayer , typename DirectConvolutionLayer , typename GEMMConvolutionLayer , typename WinogradConvolutionLayer >
Status validate_convolution_layer (ConvolutionLayerNode &node)
 Validates a Convolution layer node. More...
 
template<typename DepthwiseConvolutionLayer >
Status validate_depthwise_convolution_layer (DepthwiseConvolutionLayerNode &node)
 Validates a Depthwise Convolution layer node. More...
 
template<typename DepthToSpaceLayer >
Status validate_depth_to_space_layer (DepthToSpaceLayerNode &node)
 Validates a depth to space layer node. More...
 
template<typename DequantizationLayer >
Status validate_dequantization_layer (DequantizationLayerNode &node)
 Validates a dequantize layer node. More...
 
template<typename DetectionOutputLayer >
Status validate_detection_output_layer (DetectionOutputLayerNode &node)
 Validates a detection output layer node. More...
 
template<typename DetectionPostProcessLayer >
Status validate_detection_post_process_layer (DetectionPostProcessLayerNode &node)
 Validates a detection post process layer node. More...
 
template<typename GenerateProposalsLayer >
Status validate_generate_proposals_layer (GenerateProposalsLayerNode &node)
 Validates a Generate Proposals layer node. More...
 
template<typename L2NormalizeLayer >
Status validate_l2_normalize_layer (L2NormalizeLayerNode &node)
 Validates a L2Normalization layer node. More...
 
template<typename NormalizePlanarYUVLayer >
Status validate_normalize_planar_yuv_layer (NormalizePlanarYUVLayerNode &node)
 Validates a NormalizePlanarYUV layer node. More...
 
template<typename PadLayer >
Status validate_pad_layer (PadLayerNode &node)
 Validates a pad layer node. More...
 
template<typename PermuteLayer >
Status validate_permute_layer (PermuteLayerNode &node)
 Validates a permute layer node. More...
 
template<typename PReluLayer >
Status validate_prelu_layer (PReluLayerNode &node)
 Validates a PRelu layer node. More...
 
template<typename PriorBoxLayer >
Status validate_priorbox_layer (PriorBoxLayerNode &node)
 Validates a priorbox layer node. More...
 
template<typename QuantizationLayer >
Status validate_quantization_layer (QuantizationLayerNode &node)
 Validates a Quantization layer node. More...
 
template<typename ReductionLayer >
Status validate_reduction_operation_layer (ReductionLayerNode &node)
 Validates a Reduction operation layer node. More...
 
template<typename ReorgLayer >
Status validate_reorg_layer (ReorgLayerNode &node)
 Validates a Reorg layer node. More...
 
template<typename ReshapeLayer >
Status validate_reshape_layer (ReshapeLayerNode &node)
 Validates a Reshape layer node. More...
 
template<typename ROIAlignLayer >
Status validate_roi_align_layer (ROIAlignLayerNode &node)
 Validates a ROI Align layer node. More...
 
template<typename SliceLayer >
Status validate_slice_layer (SliceLayerNode &node)
 Validates a Slice layer node. More...
 
template<typename StridedSliceLayer >
Status validate_strided_slice_layer (StridedSliceLayerNode &node)
 Validates a Strided Slice layer node. More...
 
template<typename EltwiseLayerFunctions >
Status validate_eltwise_Layer (EltwiseLayerNode &node)
 Validates a element-wise layer node. More...
 
template<typename UnaryEltwiseLayerFunctions >
Status validate_unary_eltwise_layer (UnaryEltwiseLayerNode &node)
 Validates a unary element-wise layer node. More...
 
template<>
std::unique_ptr< IFunctioncreate_detection_output_layer< CPPDetectionOutputLayer, CLTargetInfo > (DetectionOutputLayerNode &node)
 
template<>
std::unique_ptr< IFunctioncreate_detection_post_process_layer< CPPDetectionPostProcessLayer, CLTargetInfo > (DetectionPostProcessLayerNode &node)
 
template<>
std::unique_ptr< IFunctioncreate_normalization_layer< NENormalizationLayer, NETargetInfo > (NormalizationLayerNode &node, GraphContext &ctx)
 

Function Documentation

◆ create_activation_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_activation_layer ( ActivationLayerNode node)

Creates a backend activation layer function.

Template Parameters
ActivationLayerFunctionBackend activation function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend activation layer function

Definition at line 99 of file FunctionHelpers.h.

100 {
101  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
102 
103  // Extract IO and info
104  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
105  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
106  const ActivationLayerInfo act_info = node.activation_info();
107 
108  // Create function
109  auto func = std::make_unique<ActivationLayerFunction>();
110  func->configure(input, output, act_info);
111 
112  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
113  << node.name()
114  << " Type: " << node.type()
115  << " Target: " << TargetInfo::TargetType
116  << " Data Type: " << input->info()->data_type()
117  << " Shape: " << input->info()->tensor_shape()
118  << " Activation function: " << act_info.activation()
119  << " a: " << act_info.a()
120  << " b: " << act_info.b()
121  << " InPlace : " << is_in_place_operation(input, output)
122  << std::endl);
123 
124  return std::move(func);
125 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_in_place_operation(void *input, void *output)
Checks if an operation is in place.
Definition: Utils.h:77
FloorUKernelPtr func

References ActivationLayerNode::activation_info(), ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, arm_compute::graph::backends::is_in_place_operation(), INode::name(), INode::output(), and ActivationLayerNode::type().

◆ create_arg_min_max_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_arg_min_max_layer ( ArgMinMaxLayerNode node)

Creates a backend argminmax layer function.

Template Parameters
ArgMinMaxLayerFunctionBackend activation function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend argminmax layer function

Definition at line 137 of file FunctionHelpers.h.

138 {
139  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
140 
141  // Extract IO and info
142  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
143  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
144  const ReductionOperation op = node.reduction_operation();
145  unsigned int axis = node.axis();
146 
147  // Create function
148  auto func = std::make_unique<ArgMinMaxLayerFunction>();
149  func->configure(input, axis, output, op);
150 
151  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
152  << node.name()
153  << " Type: " << node.type()
154  << " Target: " << TargetInfo::TargetType
155  << " Data Type: " << input->info()->data_type()
156  << " Shape: " << input->info()->tensor_shape()
157  << " Reduction Operation: " << op
158  << " axis: " << axis
159  << std::endl);
160 
161  return std::move(func);
162 }
ReductionOperation
Available reduction operations.
Definition: Types.h:457
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_LOG_GRAPH_INFO, ArgMinMaxLayerNode::axis(), func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), ArgMinMaxLayerNode::reduction_operation(), and ArgMinMaxLayerNode::type().

◆ create_batch_normalization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_batch_normalization_layer ( BatchNormalizationLayerNode node)

Create a backend batch normalization layer function.

Template Parameters
BatchNormalizationLayerFunctionBackend batch normalization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend batch normalization layer function

Definition at line 174 of file FunctionHelpers.h.

175 {
176  validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
177 
178  // Extract IO and info
179  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
180  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
181  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
182  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
183  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
184 
185  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
186  const float epsilon = node.epsilon();
187  const ActivationLayerInfo fused_act = node.fused_activation();
188 
189  // Create and configure function
190  auto func = std::make_unique<BatchNormalizationLayerFunction>();
191  func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
192 
193  // Log info
194  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
195  << node.name()
196  << " Type: " << node.type()
197  << " Target: " << TargetInfo::TargetType
198  << " Data Type: " << input->info()->data_type()
199  << " Shape: " << input->info()->tensor_shape()
200  << " Epsilon: " << epsilon << " "
201  << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
202  << " InPlace: " << is_in_place_operation(input, output)
203  << std::endl);
204 
205  return std::move(func);
206 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const GEMMConfigNative &config)
Definition: Utils.cpp:156
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_in_place_operation(void *input, void *output)
Checks if an operation is in place.
Definition: Utils.h:77
FloorUKernelPtr func

References ActivationLayerInfo::activation(), ARM_COMPUTE_LOG_GRAPH_INFO, ActivationLayerInfo::enabled(), arm_compute::quantization::epsilon, BatchNormalizationLayerNode::epsilon(), func, BatchNormalizationLayerNode::fused_activation(), INode::input(), arm_compute::test::validation::input, arm_compute::graph::backends::is_in_place_operation(), INode::name(), INode::output(), arm_compute::to_string(), and BatchNormalizationLayerNode::type().

◆ create_bounding_box_transform_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_bounding_box_transform_layer ( BoundingBoxTransformLayerNode node)

Create a backend bounding box transform layer function.

Template Parameters
BoundingBoxTransformLayerFunctionBackend bounding box transform function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend bounding box transform layer function

Definition at line 331 of file FunctionHelpers.h.

332 {
333  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
334 
335  // Extract IO and info
336  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
337  typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
338  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
339  const BoundingBoxTransformInfo bbox_info = node.info();
340 
341  // Create and configure function
342  auto func = std::make_unique<BoundingBoxTransformLayerFunction>();
343  func->configure(input, output, deltas, bbox_info);
344 
345  // Log info
346  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
347  << node.name()
348  << " Type: " << node.type()
349  << " Target: " << TargetInfo::TargetType
350  << " Data Type: " << input->info()->data_type()
351  << " Shape: " << input->info()->tensor_shape()
352  << " BoundingBox Info img W: " << bbox_info.img_width() << " "
353  << " BoundingBox Info img H: " << bbox_info.img_height() << " "
354  << std::endl);
355 
356  return std::move(func);
357 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_LOG_GRAPH_INFO, func, BoundingBoxTransformLayerNode::info(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), and BoundingBoxTransformLayerNode::type().

◆ create_channel_shuffle_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_channel_shuffle_layer ( ChannelShuffleLayerNode node)

Create a backend channel shuffle layer function.

Template Parameters
ChannelShuffleLayerFunctionBackend channel shuffle function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend channel shuffle layer function

Definition at line 369 of file FunctionHelpers.h.

370 {
371  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
372 
373  // Extract IO and info
374  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
375  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
376  const unsigned int num_groups = node.num_groups();
377 
378  // Create function
379  auto func = std::make_unique<ChannelShuffleLayerFunction>();
380  func->configure(input, output, num_groups);
381 
382  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
383  << node.name()
384  << " Type: " << node.type()
385  << " Target: " << TargetInfo::TargetType
386  << " Data Type: " << input->info()->data_type()
387  << " Shape: " << input->info()->tensor_shape()
388  << " Num groups: " << num_groups
389  << std::endl);
390 
391  return std::move(func);
392 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
const unsigned int num_groups
Definition: Im2Col.cpp:153
FloorUKernelPtr func

References ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, INode::name(), ChannelShuffleLayerNode::num_groups(), arm_compute::test::validation::num_groups, INode::output(), and ChannelShuffleLayerNode::type().

◆ create_concatenate_layer()

std::unique_ptr<arm_compute::IFunction> arm_compute::graph::backends::detail::create_concatenate_layer ( ConcatenateLayerNode node)

Create a backend layer concatenate function.

Template Parameters
ConcatenateLayerFunctionBackend concatenate function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend concatenate layer function

Definition at line 404 of file FunctionHelpers.h.

405 {
406  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
407  ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
408 
409  // Return nullptr if depth concatenate is switched off
410  if(!node.is_enabled())
411  {
412  return nullptr;
413  }
414 
415  // Extract IO and info
416  std::vector<typename TargetInfo::SrcTensorType *> inputs;
417  for(unsigned int i = 0; i < node.num_inputs(); ++i)
418  {
419  inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
420  }
421  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
422  const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
423  const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
424 
425  // Create and configure function
426  auto func = std::make_unique<ConcatenateLayerFunction>();
427  func->configure(inputs, output, concat_axis);
428 
429  // Log info
430  const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
431  std::ostringstream qss;
432  if(is_quantized)
433  {
434  qss << " Output QuantInfo: " << output->info()->quantization_info();
435  }
436  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
437  << node.name()
438  << " Type: " << node.type()
439  << " Target: " << TargetInfo::TargetType
440  << " Data Type: " << output->info()->data_type()
441  << " Shape: " << output->info()->tensor_shape()
442  << " Num Inputs: " << inputs.size()
443  << " Axis: " << concat_axis
444  << qss.str()
445  << std::endl);
446 
447  return std::move(func);
448 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
const DataLayout data_layout
Definition: Im2Col.cpp:151
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:989
FloorUKernelPtr func
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
DataLayout
[DataLayout enum definition]
Definition: Types.h:114
size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get index of a tensor's given dimension depending on its layout.
Definition: Utils.cpp:137

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ConcatenateLayerNode::concatenation_axis(), arm_compute::test::validation::data_layout, Tensor::desc(), func, arm_compute::graph::get_dimension_idx(), INode::id(), INode::input(), arm_compute::is_data_type_quantized_asymmetric(), ConcatenateLayerNode::is_enabled(), TensorDescriptor::layout, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), ConcatenateLayerNode::type(), and arm_compute::UNKNOWN.

◆ create_convolution_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_convolution_layer ( ConvolutionLayerNode node,
GraphContext ctx 
)

Create a backend convolution layer function.

Template Parameters
ConvolutionLayerFunctionsBackend convolution functions
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend convolution layer function

Definition at line 461 of file FunctionHelpers.h.

462 {
463  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
464 
465  // Extract IO and info
466  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
467  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
468  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
469  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
470 
471  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
472 
473  if(is_quantized)
474  {
475  biases->info()->set_data_type(DataType::S32);
476  }
477 
478  const PadStrideInfo conv_info = node.convolution_info();
479  const unsigned int num_groups = node.num_groups();
480  const ConvolutionMethod conv_algorithm = node.convolution_method();
481  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
482  const ActivationLayerInfo fused_act = node.fused_activation();
483 
484  // Create and configure function (we assume that functions have been validated before creation)
485  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
486  std::unique_ptr<IFunction> func;
487  std::string func_name;
488 
489  if(conv_algorithm == ConvolutionMethod::Winograd)
490  {
491  ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
492  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
493  std::string("WinogradConvolutionLayer"), mm,
494  input, weights, biases, output, conv_info, fused_act, fast_math);
495  }
496  else if(conv_algorithm == ConvolutionMethod::Direct)
497  {
498  ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
499  std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
500  std::string("DirectConvolutionLayer"),
501  input, weights, biases, output, conv_info, fused_act);
502  }
503  else if(conv_algorithm == ConvolutionMethod::GEMM)
504  {
505  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
506  std::string("GEMMConvolutionLayer"), mm,
507  input, weights, biases, output, conv_info,
508  WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
509  }
510  else
511  {
512  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
513  std::string("GenericConvolutionLayer"), mm,
514  input, weights, biases, output, conv_info,
515  WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
516  }
517 
518  // Log info
519  std::ostringstream qss;
520  if(is_quantized)
521  {
522  qss << " Input QuantInfo: " << input->info()->quantization_info()
523  << " Weights QuantInfo: " << weights->info()->quantization_info()
524  << " Output QuantInfo: " << output->info()->quantization_info();
525  }
526  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
527  << node.name()
528  << " Type: " << func_name
529  << " Target: " << TargetInfo::TargetType
530  << " Data Type: " << input->info()->data_type()
531  << " Groups: " << num_groups
532  << " Input shape: " << input->info()->tensor_shape()
533  << " Weights shape: " << weights->info()->tensor_shape()
534  << " Output shape: " << output->info()->tensor_shape()
535  << qss.str()
536  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
537  << std::endl);
538  return std::move(func);
539 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const GEMMConfigNative &config)
Definition: Utils.cpp:156
ConvolutionMethod
Available ConvolutionMethod.
Definition: Types.h:132
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
const unsigned int num_groups
Definition: Im2Col.cpp:153
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:989
FloorUKernelPtr func
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

References ActivationLayerInfo::activation(), ARM_COMPUTE_ERROR_ON_MSG, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, ConvolutionLayerNode::convolution_info(), ConvolutionLayerNode::convolution_method(), arm_compute::graph::Direct, arm_compute::graph::Enabled, ActivationLayerInfo::enabled(), ConvolutionLayerNode::fast_math_hint(), func, ConvolutionLayerNode::fused_activation(), arm_compute::graph::GEMM, arm_compute::graph::backends::get_memory_manager(), INode::input(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), INode::name(), ConvolutionLayerNode::num_groups(), arm_compute::test::validation::num_groups, INode::output(), arm_compute::S32, arm_compute::to_string(), arm_compute::utils::cast::U, and arm_compute::graph::Winograd.

◆ create_deconvolution_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_deconvolution_layer ( DeconvolutionLayerNode node,
GraphContext ctx 
)

Create a backend deconvolution layer function.

Template Parameters
DeconvolutionLayerFunctionBackend deconvolution function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend deconvolution layer function

Definition at line 552 of file FunctionHelpers.h.

553 {
554  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
555 
556  // Extract IO and info
557  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
558  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
559  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
560  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
561 
562  const PadStrideInfo deconv_info = node.deconvolution_info();
563 
564  // Create and configure function (we assume that functions have been validated before creation)
565  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
566  std::unique_ptr<IFunction> func;
567 
568  std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
569  std::string(), mm,
570  input, weights, biases, output, deconv_info);
571 
572  // Log info
573  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
574  << node.name()
575  << " Type: " << node.type()
576  << " Target: " << TargetInfo::TargetType
577  << " Data Type: " << input->info()->data_type()
578  << " Input shape: " << input->info()->tensor_shape()
579  << " Weights shape: " << weights->info()->tensor_shape()
580  << " Output shape: " << output->info()->tensor_shape()
581  << std::endl);
582  return func;
583 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

References ARM_COMPUTE_LOG_GRAPH_INFO, DeconvolutionLayerNode::deconvolution_info(), func, arm_compute::graph::backends::get_memory_manager(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), and DeconvolutionLayerNode::type().

◆ create_depth_to_space_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_depth_to_space_layer ( DepthToSpaceLayerNode node)

Create a backend depth to space layer function.

Template Parameters
DepthToSpaceLayerNodeFunction Backend depth to space function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend depth to space layer function

Definition at line 657 of file FunctionHelpers.h.

658 {
659  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
660 
661  // Extract IO and info
662  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
663  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
664 
665  ARM_COMPUTE_ERROR_ON(input == nullptr);
666  ARM_COMPUTE_ERROR_ON(output == nullptr);
667 
668  // Create and configure function
669  auto func = std::make_unique<DepthToSpaceLayerFunction>();
670  func->configure(input, output, node.block_shape());
671 
672  // Log info
673  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
674  << node.name()
675  << " Type: " << node.type()
676  << " Target: " << TargetInfo::TargetType
677  << " Data Type: " << input->info()->data_type()
678  << " Input shape: " << input->info()->tensor_shape()
679  << " Block Size: " << node.block_shape()
680  << " Output shape: " << output->info()->tensor_shape()
681  << std::endl);
682 
683  return std::move(func);
684 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, DepthToSpaceLayerNode::block_shape(), func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), and DepthToSpaceLayerNode::type().

◆ create_depthwise_convolution_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_depthwise_convolution_layer ( DepthwiseConvolutionLayerNode node)

Create a backend layer depth-wise convolution function.

Template Parameters
DepthwiseConvolutionLayerFunctionsBackend depthwise convolution function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend depth-wise convolution layer function

Definition at line 595 of file FunctionHelpers.h.

596 {
597  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
598 
599  // Extract IO and info
600  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
601  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
602  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
603  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
604 
605  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
606 
607  if(is_quantized)
608  {
609  biases->info()->set_data_type(DataType::S32);
610  }
611 
612  const PadStrideInfo conv_info = node.convolution_info();
613  const unsigned int depth_multiplier = node.depth_multiplier();
614  const ActivationLayerInfo fused_act = node.fused_activation();
615 
616  // Create and configure function (we assume that functions have been validated before creation)
617  std::unique_ptr<IFunction> func;
618  std::string func_name;
619 
620  std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
621  std::string("DepthwiseConvolutionLayer"),
622  input, weights, biases, output, conv_info, depth_multiplier, fused_act);
623 
624  // Log info
625  std::ostringstream qss;
626  if(is_quantized)
627  {
628  qss << " Input QuantInfo: " << input->info()->quantization_info()
629  << " Weights QuantInfo: " << weights->info()->quantization_info()
630  << " Output QuantInfo: " << output->info()->quantization_info();
631  }
632  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
633  << node.name()
634  << " Type: " << func_name
635  << " Target: " << TargetInfo::TargetType
636  << " Data Type: " << input->info()->data_type()
637  << " Input shape: " << input->info()->tensor_shape()
638  << " Weights shape: " << weights->info()->tensor_shape()
639  << " Output shape: " << output->info()->tensor_shape()
640  << " Depth multiplier: " << depth_multiplier
641  << qss.str()
642  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
643  << std::endl);
644  return std::move(func);
645 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const GEMMConfigNative &config)
Definition: Utils.cpp:156
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:989
FloorUKernelPtr func

References ActivationLayerInfo::activation(), ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, DepthwiseConvolutionLayerNode::convolution_info(), DepthwiseConvolutionLayerNode::depth_multiplier(), ActivationLayerInfo::enabled(), func, DepthwiseConvolutionLayerNode::fused_activation(), INode::input(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), INode::name(), INode::output(), arm_compute::S32, and arm_compute::to_string().

◆ create_dequantization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_dequantization_layer ( DequantizationLayerNode node)

Create a backend dequantize layer function.

Template Parameters
DequantizationLayerFunction Backend dequantize function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend dequantize layer function

Definition at line 696 of file FunctionHelpers.h.

697 {
698  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
699 
700  // Extract IO and info
701  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
702  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
703 
704  ARM_COMPUTE_ERROR_ON(input == nullptr);
705  ARM_COMPUTE_ERROR_ON(output == nullptr);
706 
707  // Create and configure function
708  auto func = std::make_unique<DequantizationLayerFunction>();
709  func->configure(input, output);
710 
711  // Log info
712  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
713  << node.name()
714  << " Type: " << node.type()
715  << " Target: " << TargetInfo::TargetType
716  << " Data Type: " << input->info()->data_type()
717  << " Input shape: " << input->info()->tensor_shape()
718  << " Input quantization info: " << output->info()->quantization_info()
719  << " Output shape: " << output->info()->tensor_shape()
720  << std::endl);
721 
722  return std::move(func);
723 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), and DequantizationLayerNode::type().

◆ create_detection_output_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_detection_output_layer ( DetectionOutputLayerNode node)

Create a backend detection output layer function.

Template Parameters
DetectionOutputLayerFunction Backend detection output function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend detection output layer function

Definition at line 734 of file FunctionHelpers.h.

735 {
736  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
737 
738  // Extract IO and info
739  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
740  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
741  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
742  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
743  const DetectionOutputLayerInfo detect_info = node.detection_output_info();
744 
745  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
746  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
747  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
748  ARM_COMPUTE_ERROR_ON(output == nullptr);
749 
750  // Create and configure function
751  auto func = std::make_unique<DetectionOutputLayerFunction>();
752  func->configure(input0, input1, input2, output, detect_info);
753 
754  // Log info
755  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
756  << node.name()
757  << " Type: " << node.type()
758  << " Target: " << TargetInfo::TargetType
759  << " Data Type: " << input0->info()->data_type()
760  << " Input0 shape: " << input0->info()->tensor_shape()
761  << " Input1 shape: " << input1->info()->tensor_shape()
762  << " Input2 shape: " << input2->info()->tensor_shape()
763  << " Output shape: " << output->info()->tensor_shape()
764  << " DetectionOutputLayer info: " << detect_info
765  << std::endl);
766 
767  return std::move(func);
768 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, DetectionOutputLayerNode::detection_output_info(), func, INode::input(), INode::name(), INode::output(), and DetectionOutputLayerNode::type().

◆ create_detection_output_layer< CPPDetectionOutputLayer, CLTargetInfo >()

Definition at line 129 of file CLFunctionsFactory.cpp.

130 {
131  validate_node<CLTargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
132 
133  // Extract IO and info
134  CLTargetInfo::TensorType *input0 = get_backing_tensor<CLTargetInfo>(node.input(0));
135  CLTargetInfo::TensorType *input1 = get_backing_tensor<CLTargetInfo>(node.input(1));
136  CLTargetInfo::TensorType *input2 = get_backing_tensor<CLTargetInfo>(node.input(2));
137  CLTargetInfo::TensorType *output = get_backing_tensor<CLTargetInfo>(node.output(0));
138  const DetectionOutputLayerInfo detect_info = node.detection_output_info();
139 
140  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
141  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
142  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
143  ARM_COMPUTE_ERROR_ON(output == nullptr);
144 
145  // Create and configure function
146  auto func = std::make_unique<CPPDetectionOutputLayer>();
147  func->configure(input0, input1, input2, output, detect_info);
148 
149  // Log info
150  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
151  << node.name()
152  << " Type: " << node.type()
153  << " Target: " << CLTargetInfo::TargetType
154  << " Data Type: " << input0->info()->data_type()
155  << " Input0 shape: " << input0->info()->tensor_shape()
156  << " Input1 shape: " << input1->info()->tensor_shape()
157  << " Input2 shape: " << input2->info()->tensor_shape()
158  << " Output shape: " << output->info()->tensor_shape()
159  << " DetectionOutputLayer info: " << detect_info
160  << std::endl);
161 
162  auto wrap_function = std::make_unique<CPPWrapperFunction>();
163 
164  wrap_function->register_function(std::move(func));
165  wrap_function->register_tensor(input0);
166  wrap_function->register_tensor(input1);
167  wrap_function->register_tensor(input2);
168  wrap_function->register_tensor(output);
169 
170  return std::move(wrap_function);
171 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ITensorInfo::data_type(), DetectionOutputLayerNode::detection_output_info(), func, ITensor::info(), INode::input(), INode::name(), INode::output(), ITensorInfo::tensor_shape(), and DetectionOutputLayerNode::type().

Referenced by CLFunctionFactory::create().

◆ create_detection_post_process_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_detection_post_process_layer ( DetectionPostProcessLayerNode node)

Create a backend detection post process layer function.

Template Parameters
DetectionPostProcessLayerFunctionBackend detection output function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend detection post process layer function

Definition at line 780 of file FunctionHelpers.h.

781 {
782  validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
783 
784  // Extract IO and info
785  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
786  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
787  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
788  typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
789  typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
790  typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
791  typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
792  const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
793 
794  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
795  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
796  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
797  ARM_COMPUTE_ERROR_ON(output0 == nullptr);
798  ARM_COMPUTE_ERROR_ON(output1 == nullptr);
799  ARM_COMPUTE_ERROR_ON(output2 == nullptr);
800  ARM_COMPUTE_ERROR_ON(output3 == nullptr);
801 
802  // Create and configure function
803  auto func = std::make_unique<DetectionPostProcessLayerFunction>();
804  func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
805 
806  // Log info
807  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
808  << node.name()
809  << " Type: " << node.type()
810  << " Target: " << TargetInfo::TargetType
811  << " Data Type: " << input0->info()->data_type()
812  << " Input0 shape: " << input0->info()->tensor_shape()
813  << " Input1 shape: " << input1->info()->tensor_shape()
814  << " Input2 shape: " << input2->info()->tensor_shape()
815  << " Output0 shape: " << output0->info()->tensor_shape()
816  << " Output1 shape: " << output1->info()->tensor_shape()
817  << " Output2 shape: " << output2->info()->tensor_shape()
818  << " Output3 shape: " << output3->info()->tensor_shape()
819  << " DetectionPostProcessLayer info: " << detect_info
820  << std::endl);
821 
822  return std::move(func);
823 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, DetectionPostProcessLayerNode::detection_post_process_info(), func, INode::input(), INode::name(), INode::output(), and DetectionPostProcessLayerNode::type().

◆ create_detection_post_process_layer< CPPDetectionPostProcessLayer, CLTargetInfo >()

Definition at line 173 of file CLFunctionsFactory.cpp.

174 {
175  validate_node<CLTargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
176 
177  // Extract IO and info
178  CLTargetInfo::TensorType *input0 = get_backing_tensor<CLTargetInfo>(node.input(0));
179  CLTargetInfo::TensorType *input1 = get_backing_tensor<CLTargetInfo>(node.input(1));
180  CLTargetInfo::TensorType *input2 = get_backing_tensor<CLTargetInfo>(node.input(2));
181  CLTargetInfo::TensorType *output0 = get_backing_tensor<CLTargetInfo>(node.output(0));
182  CLTargetInfo::TensorType *output1 = get_backing_tensor<CLTargetInfo>(node.output(1));
183  CLTargetInfo::TensorType *output2 = get_backing_tensor<CLTargetInfo>(node.output(2));
184  CLTargetInfo::TensorType *output3 = get_backing_tensor<CLTargetInfo>(node.output(3));
185  const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
186 
187  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
188  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
189  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
190  ARM_COMPUTE_ERROR_ON(output0 == nullptr);
191  ARM_COMPUTE_ERROR_ON(output1 == nullptr);
192  ARM_COMPUTE_ERROR_ON(output2 == nullptr);
193  ARM_COMPUTE_ERROR_ON(output3 == nullptr);
194 
195  // Create and configure function
196  auto func = std::make_unique<CPPDetectionPostProcessLayer>();
197  func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
198 
199  // Log info
200  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
201  << node.name()
202  << " Type: " << node.type()
203  << " Target: " << CLTargetInfo::TargetType
204  << " Data Type: " << input0->info()->data_type()
205  << " Input0 shape: " << input0->info()->tensor_shape()
206  << " Input1 shape: " << input1->info()->tensor_shape()
207  << " Input2 shape: " << input2->info()->tensor_shape()
208  << " Output0 shape: " << output0->info()->tensor_shape()
209  << " Output1 shape: " << output1->info()->tensor_shape()
210  << " Output2 shape: " << output2->info()->tensor_shape()
211  << " Output3 shape: " << output3->info()->tensor_shape()
212  << " DetectionPostProcessLayer info: " << detect_info
213  << std::endl);
214 
215  auto wrap_function = std::make_unique<CPPWrapperFunction>();
216 
217  wrap_function->register_function(std::move(func));
218  wrap_function->register_tensor(input0);
219  wrap_function->register_tensor(input1);
220  wrap_function->register_tensor(input2);
221  wrap_function->register_tensor(output0);
222  wrap_function->register_tensor(output1);
223  wrap_function->register_tensor(output2);
224  wrap_function->register_tensor(output3);
225 
226  return std::move(wrap_function);
227 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ITensorInfo::data_type(), DetectionPostProcessLayerNode::detection_post_process_info(), func, ITensor::info(), INode::input(), INode::name(), INode::output(), ITensorInfo::tensor_shape(), and DetectionPostProcessLayerNode::type().

Referenced by CLFunctionFactory::create().

◆ create_eltwise_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_eltwise_layer ( EltwiseLayerNode node)

Create a backend element-wise operation layer function.

Template Parameters
EltwiseFunctionsBackend element-wise function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend element-wise operation layer function

Definition at line 835 of file FunctionHelpers.h.

836 {
837  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
838 
839  // Extract IO and info
840  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
841  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
842  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
843  const EltwiseOperation eltwise_op = node.eltwise_operation();
844  const ConvertPolicy convert_policy = node.convert_policy();
845  const ActivationLayerInfo act_info = node.fused_activation();
846  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
847  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
848  ARM_COMPUTE_ERROR_ON(output == nullptr);
849 
850  std::unique_ptr<IFunction> func = nullptr;
851  std::string func_name;
852  if(eltwise_op == EltwiseOperation::Add)
853  {
854  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
855  std::string("ArithmeticAddition"),
856  input1, input2, output, convert_policy, act_info);
857  }
858  else if(eltwise_op == EltwiseOperation::Sub)
859  {
860  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
861  std::string("ArithmeticSubtraction"),
862  input1, input2, output, convert_policy, act_info);
863  }
864  else if(eltwise_op == EltwiseOperation::Mul)
865  {
866  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
867  std::string("PixelWiseMultiplication"),
868  input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
869  }
870  else if(eltwise_op == EltwiseOperation::Max)
871  {
872  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
873  std::string("ElementwiseMaximum"),
874  input1, input2, output, act_info);
875  }
876  else if(eltwise_op == EltwiseOperation::Div)
877  {
878  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Division>(
879  std::string("ArithmeticDivision"),
880  input1, input2, output, act_info);
881  }
882  else
883  {
884  ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
885  }
886 
887  // Log info
888  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
889  << node.name()
890  << " Type: " << node.type()
891  << " Target: " << TargetInfo::TargetType
892  << " Operation: " << func_name
893  << " Data Type: " << input1->info()->data_type()
894  << " Shape: " << input1->info()->tensor_shape()
895  << std::endl);
896 
897  return std::move(func);
898 }
EltwiseOperation
Supported Element-wise operations.
Definition: Types.h:108
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func
ConvertPolicy
Policy to handle overflow.
Definition: Types.h:385

References arm_compute::graph::Add, ARM_COMPUTE_ERROR, ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, EltwiseLayerNode::convert_policy(), arm_compute::graph::Div, EltwiseLayerNode::eltwise_operation(), func, EltwiseLayerNode::fused_activation(), INode::input(), arm_compute::graph::Max, arm_compute::graph::Mul, INode::name(), INode::output(), EltwiseLayerNode::rounding_policy(), arm_compute::graph::Sub, and EltwiseLayerNode::type().

◆ create_flatten_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_flatten_layer ( FlattenLayerNode node)

Create a backend flatten layer function.

Template Parameters
FlattenLayerFunctionBackend flatten function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend flatten layer function

Definition at line 958 of file FunctionHelpers.h.

959 {
960  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
961 
962  // Extract IO and info
963  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
964  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
965 
966  ARM_COMPUTE_ERROR_ON(input == nullptr);
967  ARM_COMPUTE_ERROR_ON(output == nullptr);
968 
969  // Create and configure function
970  auto func = std::make_unique<FlattenLayerFunction>();
971  func->configure(input, output);
972 
973  // Log info
974  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
975  << node.name()
976  << " Type: " << node.type()
977  << " Target: " << TargetInfo::TargetType
978  << " Data Type: " << input->info()->data_type()
979  << " Input shape: " << input->info()->tensor_shape()
980  << " Output shape: " << output->info()->tensor_shape()
981  << std::endl);
982 
983  return std::move(func);
984 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), and FlattenLayerNode::type().

◆ create_fully_connected_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_fully_connected_layer ( FullyConnectedLayerNode node,
GraphContext ctx 
)

Create a backend fully connected layer function.

Template Parameters
FullyConnectedLayerFunctionBackend fully-connected function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend fully connected layer function

Definition at line 997 of file FunctionHelpers.h.

998 {
999  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1000 
1001  // Extract IO and info
1002  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1003  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
1004  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
1005  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1006  const FullyConnectedLayerInfo fc_info = node.info();
1007 
1008  ARM_COMPUTE_ERROR_ON(input == nullptr);
1009  ARM_COMPUTE_ERROR_ON(weights == nullptr);
1010  ARM_COMPUTE_ERROR_ON(output == nullptr);
1011 
1012  // Create and configure function
1013  auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
1014  auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
1015  auto func = std::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
1016  func->configure(input, weights, biases, output, fc_info);
1017 
1018  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
1019 
1020  // Log info
1021  std::ostringstream qss;
1022  if(is_quantized)
1023  {
1024  qss << " Input QuantInfo: " << input->info()->quantization_info()
1025  << " Weights QuantInfo: " << weights->info()->quantization_info()
1026  << " Output QuantInfo: " << output->info()->quantization_info();
1027  }
1028  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1029  << node.name()
1030  << " Type: " << node.type()
1031  << " Target: " << TargetInfo::TargetType
1032  << " Data Type: " << input->info()->data_type()
1033  << qss.str()
1034  << " Input shape: " << input->info()->tensor_shape()
1035  << " Weights shape: " << weights->info()->tensor_shape()
1036  << " Output shape: " << output->info()->tensor_shape()
1037  << std::endl);
1038 
1039  return std::move(func);
1040 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:989
FloorUKernelPtr func
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89
std::shared_ptr< IWeightsManager > get_weights_manager(GraphContext &ctx, Target target)
Returns the weights manager for a given target.
Definition: Utils.h:102

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, arm_compute::graph::backends::get_memory_manager(), arm_compute::graph::backends::get_weights_manager(), FullyConnectedLayerNode::info(), INode::input(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), INode::name(), INode::output(), and FullyConnectedLayerNode::type().

◆ create_fused_convolution_batch_normalization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_fused_convolution_batch_normalization_layer ( FusedConvolutionBatchNormalizationNode node,
GraphContext ctx 
)

Create a backend batch normalization layer function.

Template Parameters
BatchNormalizationLayerFunctionBackend batch normalization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend batch normalization layer function

Definition at line 219 of file FunctionHelpers.h.

220 {
221  validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
222 
223  // Extract IO and info
224  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
225  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
226  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
227  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
228  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
229  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
230  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
231 
232  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
233 
234  const PadStrideInfo conv_info = node.convolution_info();
235  const unsigned int num_groups = node.num_groups();
236  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
237  const ActivationLayerInfo fused_act = node.fused_activation();
238  const float epsilon = node.epsilon();
239 
240  // Create and configure function (we assume that functions have been validated before creation)
241  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
242  std::unique_ptr<IFunction> func;
243  std::string func_name;
244 
245  using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
246 
247  // Create and configure function
248  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
249  std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
250 
251  // Log info
252  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
253  << node.name()
254  << " Type: " << node.type()
255  << " Target: " << TargetInfo::TargetType
256  << " Data Type: " << input->info()->data_type()
257  << " Input shape: " << input->info()->tensor_shape()
258  << " Weights shape: " << weights->info()->tensor_shape()
259  << " Output shape: " << output->info()->tensor_shape()
260  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
261  << std::endl);
262  return std::move(func);
263 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const GEMMConfigNative &config)
Definition: Utils.cpp:156
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
const unsigned int num_groups
Definition: Im2Col.cpp:153
FloorUKernelPtr func
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

References ActivationLayerInfo::activation(), ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, FusedConvolutionBatchNormalizationNode::convolution_info(), arm_compute::graph::Enabled, ActivationLayerInfo::enabled(), arm_compute::quantization::epsilon, FusedConvolutionBatchNormalizationNode::epsilon(), FusedConvolutionBatchNormalizationNode::fast_math_hint(), func, FusedConvolutionBatchNormalizationNode::fused_activation(), arm_compute::graph::backends::get_memory_manager(), INode::input(), arm_compute::test::validation::input, INode::name(), FusedConvolutionBatchNormalizationNode::num_groups(), arm_compute::test::validation::num_groups, INode::output(), arm_compute::to_string(), and FusedConvolutionBatchNormalizationNode::type().

◆ create_fused_depthwise_convolution_batch_normalization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_fused_depthwise_convolution_batch_normalization_layer ( FusedDepthwiseConvolutionBatchNormalizationNode node,
GraphContext ctx 
)

Create a backend fused depthwise convolution batch normalization layer function.

Template Parameters
FusedLayerTypesFused layer types
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend fused depthwise convolution batch normalization layer function

Definition at line 276 of file FunctionHelpers.h.

277 {
278  validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
279 
280  // Extract IO and info
281  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
282  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
283  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
284  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
285  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
286  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
287  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
288 
289  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
290 
291  const PadStrideInfo conv_info = node.convolution_info();
292  const unsigned int depth_multiplier = node.depth_multiplier();
293  const ActivationLayerInfo fused_act = node.fused_activation();
294  const float epsilon = node.epsilon();
295 
296  // Create and configure function (we assume that functions have been validated before creation)
297  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
298  std::unique_ptr<IFunction> func;
299  std::string func_name;
300 
301  using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
302 
303  // Create and configure function
304  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
305  std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
306 
307  // Log info
308  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
309  << node.name()
310  << " Type: " << node.type()
311  << " Target: " << TargetInfo::TargetType
312  << " Data Type: " << input->info()->data_type()
313  << " Input shape: " << input->info()->tensor_shape()
314  << " Weights shape: " << weights->info()->tensor_shape()
315  << " Output shape: " << output->info()->tensor_shape()
316  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
317  << std::endl);
318  return std::move(func);
319 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const GEMMConfigNative &config)
Definition: Utils.cpp:156
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

References ActivationLayerInfo::activation(), ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, FusedDepthwiseConvolutionBatchNormalizationNode::convolution_info(), FusedDepthwiseConvolutionBatchNormalizationNode::depth_multiplier(), ActivationLayerInfo::enabled(), arm_compute::quantization::epsilon, FusedDepthwiseConvolutionBatchNormalizationNode::epsilon(), func, FusedDepthwiseConvolutionBatchNormalizationNode::fused_activation(), arm_compute::graph::backends::get_memory_manager(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), arm_compute::to_string(), and FusedDepthwiseConvolutionBatchNormalizationNode::type().

◆ create_generate_proposals_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_generate_proposals_layer ( GenerateProposalsLayerNode node,
GraphContext ctx 
)

Create a backend generate proposals layer function.

Template Parameters
GenerateProposalsLayerFunctionBackend generate proposals function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend generate proposals layer function

Definition at line 1053 of file FunctionHelpers.h.

1054 {
1055  validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
1056 
1057  // Extract IO and info
1058  typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
1059  typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
1060  typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
1061  typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
1062  typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
1063  typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
1064  const GenerateProposalsInfo info = node.info();
1065 
1066  ARM_COMPUTE_ERROR_ON(scores == nullptr);
1067  ARM_COMPUTE_ERROR_ON(deltas == nullptr);
1068  ARM_COMPUTE_ERROR_ON(anchors == nullptr);
1069  ARM_COMPUTE_ERROR_ON(proposals == nullptr);
1070  ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
1071 
1072  // Create and configure function
1073  auto func = std::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1074  func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1075 
1076  // Log info
1077  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1078  << " Target " << TargetInfo::TargetType
1079  << " Data Type: " << scores->info()->data_type()
1080  << " Scores shape: " << scores->info()->tensor_shape()
1081  << " Deltas shape: " << deltas->info()->tensor_shape()
1082  << " Anchors shape: " << anchors->info()->tensor_shape()
1083  << " Proposals shape: " << proposals->info()->tensor_shape()
1084  << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1085  << " Scores Out shape: " << scores_out->info()->tensor_shape()
1086  << std::endl);
1087 
1088  return std::move(func);
1089 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, arm_compute::graph::backends::get_memory_manager(), GenerateProposalsLayerNode::info(), arm_compute::test::validation::info, INode::input(), INode::output(), and GenerateProposalsLayerNode::type().

◆ create_l2_normalize_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_l2_normalize_layer ( L2NormalizeLayerNode node,
GraphContext ctx 
)

Create a backend l2 normalization layer function.

Template Parameters
NormalizationLayerFunctionBackend normalization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend normalization layer function

Definition at line 1102 of file FunctionHelpers.h.

1103 {
1104  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1105 
1106  // Extract IO and info
1107  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1108  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1109  int axis = node.axis();
1110  float epsilon = node.epsilon();
1111 
1112  ARM_COMPUTE_ERROR_ON(input == nullptr);
1113  ARM_COMPUTE_ERROR_ON(output == nullptr);
1114 
1115  // Create and configure function
1116  auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
1117  auto func = std::make_unique<L2NormalizeLayerFunction>(mm);
1118  func->configure(input, output, axis, epsilon);
1119 
1120  // Log info
1121  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1122  << node.name()
1123  << " Type: " << node.type()
1124  << " Target: " << TargetInfo::TargetType
1125  << " Data Type: " << input->info()->data_type()
1126  << " Input shape: " << input->info()->tensor_shape()
1127  << " Output shape: " << output->info()->tensor_shape()
1128  << " Axis: " << axis
1129  << " Epsilon: " << epsilon
1130  << std::endl);
1131 
1132  return std::move(func);
1133 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, L2NormalizeLayerNode::axis(), arm_compute::quantization::epsilon, L2NormalizeLayerNode::epsilon(), func, arm_compute::graph::backends::get_memory_manager(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), and L2NormalizeLayerNode::type().

◆ create_normalization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_normalization_layer ( NormalizationLayerNode node,
GraphContext ctx 
)

Create a backend normalization layer function.

Template Parameters
NormalizationLayerFunctionBackend normalization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend normalization layer function

Definition at line 1146 of file FunctionHelpers.h.

1147 {
1148  ARM_COMPUTE_UNUSED(ctx);
1149 
1150  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1151 
1152  // Extract IO and info
1153  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1154  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1155  const NormalizationLayerInfo norm_info = node.normalization_info();
1156  ARM_COMPUTE_ERROR_ON(input == nullptr);
1157  ARM_COMPUTE_ERROR_ON(output == nullptr);
1158 
1159  // Create and configure function
1160  auto func = std::make_unique<NormalizationLayerFunction>();
1161  func->configure(input, output, norm_info);
1162 
1163  // Log info
1164  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1165  << node.name()
1166  << " Type: " << node.type()
1167  << " Target: " << TargetInfo::TargetType
1168  << " Data Type: " << input->info()->data_type()
1169  << " Input shape: " << input->info()->tensor_shape()
1170  << " Output shape: " << output->info()->tensor_shape()
1171  << " Normalization info: " << norm_info.type()
1172  << std::endl);
1173 
1174  return std::move(func);
1175 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ARM_COMPUTE_UNUSED, func, INode::input(), arm_compute::test::validation::input, INode::name(), NormalizationLayerNode::normalization_info(), INode::output(), and NormalizationLayerNode::type().

◆ create_normalization_layer< NENormalizationLayer, NETargetInfo >()

Definition at line 93 of file NEFunctionFactory.cpp.

94 {
95  validate_node<NETargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
96 
97  // Extract IO and info
98  NETargetInfo::TensorType *input = get_backing_tensor<NETargetInfo>(node.input(0));
99  NETargetInfo::TensorType *output = get_backing_tensor<NETargetInfo>(node.output(0));
100  const NormalizationLayerInfo norm_info = node.normalization_info();
101  ARM_COMPUTE_ERROR_ON(input == nullptr);
102  ARM_COMPUTE_ERROR_ON(output == nullptr);
103 
104  // Create and configure function
105  auto func = std::make_unique<NENormalizationLayer>(get_memory_manager(ctx, NETargetInfo::TargetType));
106  func->configure(input, output, norm_info);
107 
108  // Log info
109  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
110  << node.name()
111  << " Type: " << node.type()
112  << " Target: " << NETargetInfo::TargetType
113  << " Data Type: " << input->info()->data_type()
114  << " Input shape: " << input->info()->tensor_shape()
115  << " Output shape: " << output->info()->tensor_shape()
116  << " Normalization info: " << norm_info.type()
117  << std::endl);
118 
119  return std::move(func);
120 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, arm_compute::graph::backends::get_memory_manager(), INode::input(), arm_compute::test::validation::input, INode::name(), NormalizationLayerNode::normalization_info(), INode::output(), and NormalizationLayerNode::type().

Referenced by NEFunctionFactory::create().

◆ create_normalize_planar_yuv_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_normalize_planar_yuv_layer ( NormalizePlanarYUVLayerNode node)

Create a backend normalize planar YUV layer function.

Template Parameters
NormalizePlanarYUVLayerFunctionBackend normalize planar YUV function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend normalize plnar YUV layer function

Definition at line 1187 of file FunctionHelpers.h.

1188 {
1189  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1190 
1191  // Extract IO and info
1192  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1193  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1194  typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1195  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1196  ARM_COMPUTE_ERROR_ON(input == nullptr);
1197  ARM_COMPUTE_ERROR_ON(mean == nullptr);
1198  ARM_COMPUTE_ERROR_ON(std == nullptr);
1199  ARM_COMPUTE_ERROR_ON(output == nullptr);
1200 
1201  // Create and configure function
1202  auto func = std::make_unique<NormalizePlanarYUVLayerFunction>();
1203  func->configure(input, output, mean, std);
1204 
1205  // Log info
1206  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1207  << node.name()
1208  << " Type: " << node.type()
1209  << " Target: " << TargetInfo::TargetType
1210  << " Data Type: " << input->info()->data_type()
1211  << " Shape: " << input->info()->tensor_shape()
1212  << std::endl);
1213 
1214  return std::move(func);
1215 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), and NormalizePlanarYUVLayerNode::type().

◆ create_pad_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_pad_layer ( PadLayerNode node)

Create a backend pad layer function.

Template Parameters
PadLayerFunctionBackend pad function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend pad layer function

Definition at line 1227 of file FunctionHelpers.h.

1228 {
1229  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1230 
1231  // Extract IO and info
1232  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1233  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1234  const PaddingList &padding = node.padding();
1235  const PixelValue pad_value = node.pad_value();
1236  ARM_COMPUTE_ERROR_ON(input == nullptr);
1237  ARM_COMPUTE_ERROR_ON(output == nullptr);
1238 
1239  // Create and configure function
1240  auto func = std::make_unique<PadLayerFunction>();
1241  func->configure(input, output, padding, pad_value);
1242 
1243  // Log info
1244  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1245  << node.name()
1246  << " Type: " << node.type()
1247  << " Target: " << TargetInfo::TargetType
1248  << " Data Type: " << input->info()->data_type()
1249  << " Input shape: " << input->info()->tensor_shape()
1250  << " Output shape: " << output->info()->tensor_shape()
1251  << std::endl);
1252 
1253  return std::move(func);
1254 }
std::vector< PaddingInfo > PaddingList
List of padding information.
Definition: Types.h:434
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), PadLayerNode::pad_value(), PadLayerNode::padding(), and PadLayerNode::type().

◆ create_permute_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_permute_layer ( PermuteLayerNode node)

Create a backend permute layer function.

Template Parameters
PermuteLayerFunctionBackend permute function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend permute layer function

Definition at line 1266 of file FunctionHelpers.h.

1267 {
1268  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1269 
1270  // Extract IO and info
1271  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1272  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1273  const PermutationVector &perm = node.permutation_vector();
1274  ARM_COMPUTE_ERROR_ON(input == nullptr);
1275  ARM_COMPUTE_ERROR_ON(output == nullptr);
1276 
1277  // Create and configure function
1278  auto func = std::make_unique<PermuteLayerFunction>();
1279  func->configure(input, output, perm);
1280 
1281  // Log info
1282  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1283  << node.name()
1284  << " Type: " << node.type()
1285  << " Target: " << TargetInfo::TargetType
1286  << " Data Type: " << input->info()->data_type()
1287  << " Input shape: " << input->info()->tensor_shape()
1288  << " Output shape: " << output->info()->tensor_shape()
1289  << " Permutation vector: " << perm
1290  << std::endl);
1291 
1292  return std::move(func);
1293 }
TensorType
Memory type.
Definition: Types.h:38
Strides PermutationVector
Permutation vector.
Definition: Types.h:49
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), PermuteLayerNode::permutation_vector(), and PermuteLayerNode::type().

◆ create_pooling_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_pooling_layer ( PoolingLayerNode node)

Create a backend pooling layer function.

Template Parameters
PoolingLayerFunctionBackend pooling function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend pooling layer function

Definition at line 1305 of file FunctionHelpers.h.

1306 {
1307  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1308 
1309  // Extract IO and info
1310  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1311  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1312  const PoolingLayerInfo pool_info = node.pooling_info();
1313  ARM_COMPUTE_ERROR_ON(input == nullptr);
1314  ARM_COMPUTE_ERROR_ON(output == nullptr);
1315 
1316  // Create and configure function
1317  auto func = std::make_unique<PoolingLayerFunction>();
1318  func->configure(input, output, pool_info);
1319 
1320  // Log info
1321  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1322  << node.name()
1323  << " Type: " << node.type()
1324  << " Target: " << TargetInfo::TargetType
1325  << " Data Type: " << input->info()->data_type()
1326  << " Input shape: " << input->info()->tensor_shape()
1327  << " Output shape: " << output->info()->tensor_shape()
1328  << " Pooling info: " << pool_info.pool_type
1329  << std::endl);
1330 
1331  return std::move(func);
1332 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), PoolingLayerNode::pooling_info(), and PoolingLayerNode::type().

◆ create_prelu_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_prelu_layer ( PReluLayerNode node)

Create a backend PRelu layer function.

Template Parameters
PReluFunctionBackend PRelu function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend PRelu layer function

Definition at line 1344 of file FunctionHelpers.h.

1345 {
1346  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1347 
1348  // Extract IO and info
1349  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1350  typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1351  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1352  ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1353  ARM_COMPUTE_ERROR_ON(output == nullptr);
1354 
1355  // Create and configure function
1356  auto func = std::make_unique<PReluFunction>();
1357  func->configure(input, alpha, output);
1358 
1359  // Log info
1360  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1361  << node.name()
1362  << " Type: " << node.type()
1363  << " Target: " << TargetInfo::TargetType
1364  << " Data Type: " << input->info()->data_type()
1365  << " Input shape: " << input->info()->tensor_shape()
1366  << " Output shape: " << output->info()->tensor_shape()
1367  << std::endl);
1368 
1369  return std::move(func);
1370 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), and PReluLayerNode::type().

◆ create_print_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_print_layer ( PrintLayerNode node)

Create a backend print layer function.

Template Parameters
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend print layer function

Definition at line 1381 of file FunctionHelpers.h.

1382 {
1383  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1384 
1385  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1386  ARM_COMPUTE_ERROR_ON(input == nullptr);
1388 
1389  // Log info
1390  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1391  << node.name()
1392  << " Type: " << node.type()
1393  << " Target: " << TargetInfo::TargetType
1394  << " Data Type: " << input->info()->data_type()
1395  << " Input shape: " << input->info()->tensor_shape()
1396  << std::endl);
1397 
1398  return nullptr;
1399 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ARM_COMPUTE_UNUSED, INode::input(), arm_compute::test::validation::input, INode::name(), and PrintLayerNode::type().

◆ create_priorbox_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_priorbox_layer ( PriorBoxLayerNode node)

Create a backend priorbox layer function.

Template Parameters
PriorBoxLayerFunctionBackend priorbox function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend priorbox layer function

Definition at line 1411 of file FunctionHelpers.h.

1412 {
1413  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1414 
1415  // Extract IO and info
1416  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1417  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1418  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1419  const PriorBoxLayerInfo prior_info = node.priorbox_info();
1420  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1421  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1422  ARM_COMPUTE_ERROR_ON(output == nullptr);
1423 
1424  // Create and configure function
1425  auto func = std::make_unique<PriorBoxLayerFunction>();
1426  func->configure(input0, input1, output, prior_info);
1427 
1428  // Log info
1429  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1430  << node.name()
1431  << " Type: " << node.type()
1432  << " Target: " << TargetInfo::TargetType
1433  << " Data Type: " << input0->info()->data_type()
1434  << " Input0 shape: " << input0->info()->tensor_shape()
1435  << " Input1 shape: " << input1->info()->tensor_shape()
1436  << " Output shape: " << output->info()->tensor_shape()
1437  << " PriorBoxLayer info: " << prior_info
1438  << std::endl);
1439 
1440  return std::move(func);
1441 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), INode::name(), INode::output(), PriorBoxLayerNode::priorbox_info(), and PriorBoxLayerNode::type().

◆ create_quantization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_quantization_layer ( QuantizationLayerNode node)

Create a backend quantization layer function.

Template Parameters
QuantizationLayerFunctionBackend quantization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend quantization layer function

Definition at line 1453 of file FunctionHelpers.h.

1454 {
1455  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1456 
1457  // Extract IO and info
1458  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1459  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1460  ARM_COMPUTE_ERROR_ON(input == nullptr);
1461  ARM_COMPUTE_ERROR_ON(output == nullptr);
1462 
1463  // Create and configure function
1464  auto func = std::make_unique<QuantizationLayerFunction>();
1465  func->configure(input, output);
1466 
1467  // Log info
1468  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1469  << node.name()
1470  << " Type: " << node.type()
1471  << " Target: " << TargetInfo::TargetType
1472  << " Data Type: " << input->info()->data_type()
1473  << " Input shape: " << input->info()->tensor_shape()
1474  << " Output shape: " << output->info()->tensor_shape()
1475  << std::endl);
1476 
1477  return std::move(func);
1478 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), and QuantizationLayerNode::type().

◆ create_reduction_operation_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_reduction_operation_layer ( ReductionLayerNode node,
GraphContext ctx 
)

Create a backend reduction operation layer function.

Template Parameters
ReductionOperationFunctionBackend reduction operation function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend reduction sum layer function

Definition at line 1491 of file FunctionHelpers.h.

1492 {
1493  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1494 
1495  // Extract IO and info
1496  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1497  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1498  ReductionOperation op = node.op();
1499  int axis = node.axis();
1500  bool keep_dims = node.keep_dims();
1501  ARM_COMPUTE_ERROR_ON(input == nullptr);
1502  ARM_COMPUTE_ERROR_ON(output == nullptr);
1503 
1504  // Create and configure function
1505  auto func = std::make_unique<ReductionOperationFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1506  func->configure(input, output, axis, op, keep_dims);
1507 
1508  // Log info
1509  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1510  << node.name()
1511  << " Type: " << node.type()
1512  << " Target: " << TargetInfo::TargetType
1513  << " Data Type: " << input->info()->data_type()
1514  << " Input shape: " << input->info()->tensor_shape()
1515  << " Output shape: " << output->info()->tensor_shape()
1516  << " Operation: " << op
1517  << " Axis: " << axis
1518  << " Keep dimensions:" << keep_dims
1519  << std::endl);
1520 
1521  return std::move(func);
1522 }
ReductionOperation
Available reduction operations.
Definition: Types.h:457
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ReductionLayerNode::axis(), func, arm_compute::graph::backends::get_memory_manager(), INode::input(), arm_compute::test::validation::input, ReductionLayerNode::keep_dims(), INode::name(), ReductionLayerNode::op(), INode::output(), and ReductionLayerNode::type().

◆ create_reorg_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_reorg_layer ( ReorgLayerNode node)

Create a backend reorg layer function.

Template Parameters
ReorgLayerFunctionBackend reorg function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend reshape layer function

Definition at line 1534 of file FunctionHelpers.h.

1535 {
1536  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1537 
1538  // Extract IO and info
1539  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1540  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1541  ARM_COMPUTE_ERROR_ON(input == nullptr);
1542  ARM_COMPUTE_ERROR_ON(output == nullptr);
1543 
1544  // Create and configure function
1545  auto func = std::make_unique<ReorgLayerFunction>();
1546  func->configure(input, output, node.stride());
1547 
1548  // Log info
1549  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1550  << node.name()
1551  << " Type: " << node.type()
1552  << " Target: " << TargetInfo::TargetType
1553  << " Data Type: " << input->info()->data_type()
1554  << " Input shape: " << input->info()->tensor_shape()
1555  << " Output shape: " << output->info()->tensor_shape()
1556  << std::endl);
1557 
1558  return std::move(func);
1559 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), ReorgLayerNode::stride(), and ReorgLayerNode::type().

◆ create_reshape_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_reshape_layer ( ReshapeLayerNode node)

Create a backend reshape layer function.

Template Parameters
ReshapeLayerFunctionBackend reshape function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend reshape layer function

Definition at line 1571 of file FunctionHelpers.h.

1572 {
1573  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1574 
1575  // Extract IO and info
1576  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1577  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1578  ARM_COMPUTE_ERROR_ON(input == nullptr);
1579  ARM_COMPUTE_ERROR_ON(output == nullptr);
1580 
1581  // Create and configure function
1582  auto func = std::make_unique<ReshapeLayerFunction>();
1583  func->configure(input, output);
1584 
1585  // Log info
1586  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1587  << node.name()
1588  << " Type: " << node.type()
1589  << " Target: " << TargetInfo::TargetType
1590  << " Data Type: " << input->info()->data_type()
1591  << " Input shape: " << input->info()->tensor_shape()
1592  << " Output shape: " << output->info()->tensor_shape()
1593  << std::endl);
1594 
1595  return std::move(func);
1596 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), and ReshapeLayerNode::type().

◆ create_resize_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_resize_layer ( ResizeLayerNode node)

Create a backend resize layer function.

Template Parameters
ResizeLayerFunctionBackend resize function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend resize layer function

Definition at line 1608 of file FunctionHelpers.h.

1609 {
1610  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1611 
1612  // Extract IO and info
1613  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1614  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1615  ARM_COMPUTE_ERROR_ON(input == nullptr);
1616  ARM_COMPUTE_ERROR_ON(output == nullptr);
1617  const InterpolationPolicy policy = node.policy();
1618 
1619  // Create and configure function
1620  auto func = std::make_unique<ResizeLayerFunction>();
1621  func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false });
1622 
1623  // Log info
1624  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1625  << node.name()
1626  << " Type: " << node.type()
1627  << " Target: " << TargetInfo::TargetType
1628  << " Data Type: " << input->info()->data_type()
1629  << " Input shape: " << input->info()->tensor_shape()
1630  << " Output shape: " << output->info()->tensor_shape()
1631  << " Interpolation: " << policy
1632  << std::endl);
1633 
1634  return std::move(func);
1635 }
InterpolationPolicy
Interpolation method.
Definition: Types.h:392
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::CENTER, arm_compute::CONSTANT, func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), ResizeLayerNode::policy(), and ResizeLayerNode::type().

◆ create_roi_align_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_roi_align_layer ( ROIAlignLayerNode node)

Create a backend ROI align layer function.

Template Parameters
ROIAlignLayerFunctionROI Align function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
ROI Align layer function

Definition at line 1647 of file FunctionHelpers.h.

1648 {
1649  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1650 
1651  // Extract IO and info
1652  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1653  typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1654  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1655  ARM_COMPUTE_ERROR_ON(input == nullptr);
1656  ARM_COMPUTE_ERROR_ON(output == nullptr);
1657  ARM_COMPUTE_ERROR_ON(rois == nullptr);
1658 
1659  const ROIPoolingLayerInfo pool_info = node.pooling_info();
1660 
1661  // Create and configure function
1662  auto func = std::make_unique<ROIAlignLayerFunction>();
1663 
1664  func->configure(input, rois, output, pool_info);
1665 
1666  // Log info
1667  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1668  << node.name()
1669  << " Type: " << node.type()
1670  << " Target: " << TargetInfo::TargetType
1671  << " Data Type: " << input->info()->data_type()
1672  << " Input shape: " << input->info()->tensor_shape()
1673  << " Output shape: " << output->info()->tensor_shape()
1674  << " ROIs shape: " << rois->info()->tensor_shape()
1675  << " ROIPooling width: " << pool_info.pooled_width()
1676  << " ROIPooling height: " << pool_info.pooled_height()
1677  << std::endl);
1678 
1679  return std::move(func);
1680 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), ROIPoolingLayerInfo::pooled_height(), ROIPoolingLayerInfo::pooled_width(), ROIAlignLayerNode::pooling_info(), and ROIAlignLayerNode::type().

◆ create_slice_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_slice_layer ( SliceLayerNode node)

Create a backend slice layer function.

Template Parameters
SliceLayerFunctionBackend slice function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend slice layer function

Definition at line 1692 of file FunctionHelpers.h.

1693 {
1694  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1695 
1696  // Extract IO and info
1697  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1698  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1699  ARM_COMPUTE_ERROR_ON(input == nullptr);
1700  ARM_COMPUTE_ERROR_ON(output == nullptr);
1701 
1702  // Create and configure function
1703  auto func = std::make_unique<SliceLayerFunction>();
1704  func->configure(input, output, node.starts(), node.ends());
1705 
1706  // Log info
1707  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1708  << node.name()
1709  << " Type: " << node.type()
1710  << " Target: " << TargetInfo::TargetType
1711  << " Data Type: " << input->info()->data_type()
1712  << " Input shape: " << input->info()->tensor_shape()
1713  << " Output shape: " << output->info()->tensor_shape()
1714  << std::endl);
1715 
1716  return std::move(func);
1717 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, SliceLayerNode::ends(), func, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), SliceLayerNode::starts(), and SliceLayerNode::type().

◆ create_softmax_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_softmax_layer ( SoftmaxLayerNode node,
GraphContext ctx 
)

Create a backend softmax layer function.

Template Parameters
SoftmaxLayerFunctionBackend softmax function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend softmax layer function

Definition at line 1730 of file FunctionHelpers.h.

1731 {
1732  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1733 
1734  // Extract IO and info
1735  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1736  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1737  const float beta = node.beta();
1738  ARM_COMPUTE_ERROR_ON(input == nullptr);
1739  ARM_COMPUTE_ERROR_ON(output == nullptr);
1740 
1741  // Create and configure function
1742  auto func = std::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1743  func->configure(input, output, beta);
1744 
1745  // Log info
1746  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1747  << node.name()
1748  << " Type: " << node.type()
1749  << " Target: " << TargetInfo::TargetType
1750  << " Data Type: " << input->info()->data_type()
1751  << " Input shape: " << input->info()->tensor_shape()
1752  << " Output shape: " << output->info()->tensor_shape()
1753  << std::endl);
1754 
1755  return std::move(func);
1756 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, SoftmaxLayerNode::beta(), func, arm_compute::graph::backends::get_memory_manager(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), and SoftmaxLayerNode::type().

◆ create_stack_layer()

std::unique_ptr<arm_compute::IFunction> arm_compute::graph::backends::detail::create_stack_layer ( StackLayerNode node)

Create a backend layer stack function.

Template Parameters
StackLayerFunctionBackend stack function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend stack layer function

Definition at line 1768 of file FunctionHelpers.h.

1769 {
1770  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1771  ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1772 
1773  // Extract IO and info
1774  std::vector<typename TargetInfo::TensorType *> inputs;
1775  for(unsigned int i = 0; i < node.num_inputs(); ++i)
1776  {
1777  inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1778  }
1779  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1780  const int axis = node.axis();
1781 
1782  // Create and configure function
1783  auto func = std::make_unique<StackLayerFunction>();
1784  func->configure(inputs, axis, output);
1785 
1786  // Log info
1787  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1788  << node.name()
1789  << " Type: " << node.type()
1790  << " Target: " << TargetInfo::TargetType
1791  << " Data Type: " << output->info()->data_type()
1792  << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1793  << " Output shape: " << output->info()->tensor_shape()
1794  << " Num Inputs: " << inputs.size()
1795  << " Axis: " << axis
1796  << std::endl);
1797 
1798  return std::move(func);
1799 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ARM_COMPUTE_LOG_GRAPH_VERBOSE, StackLayerNode::axis(), func, INode::id(), INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and StackLayerNode::type().

◆ create_strided_slice_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_strided_slice_layer ( StridedSliceLayerNode node)

Create a backend slice layer function.

Template Parameters
StridedSliceLayerFunctionBackend strided slice function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend strided slice layer function

Definition at line 1811 of file FunctionHelpers.h.

1812 {
1813  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1814 
1815  // Extract IO and info
1816  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1817  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1818  Coordinates starts = node.starts();
1819  Coordinates ends = node.ends();
1820  BiStrides strides = node.strides();
1821  StridedSliceLayerInfo info = node.strided_slice_info();
1822 
1823  ARM_COMPUTE_ERROR_ON(input == nullptr);
1824  ARM_COMPUTE_ERROR_ON(output == nullptr);
1825 
1826  // Create and configure function
1827  auto func = std::make_unique<StridedSliceLayerFunction>();
1828  func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
1829 
1830  // Log info
1831  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1832  << node.name()
1833  << " Type: " << node.type()
1834  << " Target: " << TargetInfo::TargetType
1835  << " Data Type: " << input->info()->data_type()
1836  << " Input shape: " << input->info()->tensor_shape()
1837  << " Output shape: " << output->info()->tensor_shape()
1838  << std::endl);
1839 
1840  return std::move(func);
1841 }
TensorType
Memory type.
Definition: Types.h:38
Coordinates BiStrides
Bidirectional strides.
Definition: Types.h:51
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, StridedSliceLayerNode::ends(), func, arm_compute::test::validation::info, INode::input(), arm_compute::test::validation::input, INode::name(), INode::output(), StridedSliceLayerNode::starts(), StridedSliceLayerNode::strided_slice_info(), StridedSliceLayerNode::strides(), and StridedSliceLayerNode::type().

◆ create_unary_eltwise_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_unary_eltwise_layer ( UnaryEltwiseLayerNode node)

Create a backend unary element-wise operation layer function.

Template Parameters
UnaryEltwiseFunctionsBackend unary element-wise function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend unary element-wise operation layer function

Definition at line 910 of file FunctionHelpers.h.

911 {
912  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
913 
914  // Extract IO and info
915  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
916  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
917  const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
918 
919  ARM_COMPUTE_ERROR_ON(input == nullptr);
920  ARM_COMPUTE_ERROR_ON(output == nullptr);
921 
922  std::unique_ptr<IFunction> func = nullptr;
923  std::string func_name;
924  if(eltwise_op == UnaryEltwiseOperation::Exp)
925  {
926  std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
927  std::string("Exp"),
928  input, output);
929  }
930  else
931  {
932  ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
933  }
934 
935  // Log info
936  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
937  << node.name()
938  << " Type: " << node.type()
939  << " Target: " << TargetInfo::TargetType
940  << " Operation: " << func_name
941  << " Data Type: " << input->info()->data_type()
942  << " Shape: " << input->info()->tensor_shape()
943  << std::endl);
944 
945  return std::move(func);
946 }
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
FloorUKernelPtr func
UnaryEltwiseOperation
Supported Unary Element-wise operations.
Definition: Types.h:118

References ARM_COMPUTE_ERROR, ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, UnaryEltwiseLayerNode::eltwise_descriptor(), arm_compute::graph::Exp, func, INode::input(), arm_compute::test::validation::input, INode::name(), UnaryEltwiseLayerDescriptor::op, INode::output(), and UnaryEltwiseLayerNode::type().

◆ get_backing_tensor()

TargetInfo::TensorType* arm_compute::graph::backends::detail::get_backing_tensor ( arm_compute::graph::Tensor tensor)

Returns backing tensor of a given tensor.

Template Parameters
TargetInfoTarget information
Parameters
[in]tensorTensor to extract the backing tensor from
Returns
Backing tensor if present else nullptr

Definition at line 59 of file FunctionHelpers.h.

60 {
61  typename TargetInfo::TensorType *backing_tensor = nullptr;
62  if(tensor != nullptr)
63  {
64  ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
65  // Get backing tensor handle
66  ITensorHandle *tensor_handle = tensor->handle();
67  // Get backing tensor
68  backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
69  }
70 
71  return backing_tensor;
72 }
ITensorHandle * handle()
Backend tensor handle accessor.
Definition: Tensor.cpp:55
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
TensorDescriptor & desc()
TensorInfo metadata accessor.
Definition: Tensor.cpp:40

References ARM_COMPUTE_ERROR_ON, Tensor::desc(), Tensor::handle(), and TensorDescriptor::target.

◆ get_backing_tensor_info()

arm_compute::ITensorInfo* arm_compute::graph::backends::detail::get_backing_tensor_info ( arm_compute::graph::Tensor tensor)
inline

Returns backing tensor info of a given tensor.

Parameters
[in]tensorTensor to extract the backing tensor from
Returns
Backing tensor tensor info if present else nullptr

Definition at line 50 of file ValidateHelpers.h.

51 {
52  return ((tensor == nullptr) || (tensor->handle() == nullptr)) ? nullptr : tensor->handle()->tensor().info();
53 }
ITensorHandle * handle()
Backend tensor handle accessor.
Definition: Tensor.cpp:55
virtual arm_compute::ITensor & tensor()=0
Backend tensor object accessor.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.

References Tensor::handle(), ITensor::info(), and ITensorHandle::tensor().

Referenced by validate_arg_min_max_layer(), validate_bounding_box_transform_layer(), validate_channel_shuffle_layer(), validate_convolution_layer(), validate_depth_to_space_layer(), validate_depthwise_convolution_layer(), validate_dequantization_layer(), validate_detection_output_layer(), validate_detection_post_process_layer(), validate_eltwise_Layer(), validate_generate_proposals_layer(), validate_l2_normalize_layer(), validate_normalize_planar_yuv_layer(), validate_pad_layer(), validate_permute_layer(), validate_prelu_layer(), validate_priorbox_layer(), validate_quantization_layer(), validate_reduction_operation_layer(), validate_reorg_layer(), validate_reshape_layer(), validate_roi_align_layer(), validate_slice_layer(), validate_strided_slice_layer(), and validate_unary_eltwise_layer().

◆ validate_arg_min_max_layer()

Status arm_compute::graph::backends::detail::validate_arg_min_max_layer ( ArgMinMaxLayerNode node)

Validates a ArgMinMax layer node.

Template Parameters
ArgMinMaxlayer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 64 of file ValidateHelpers.h.

65 {
66  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ArgMinMaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
67  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
68  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
69 
70  // Extract IO and info
72  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
73 
74  // Validate function
75  return ArgMinMaxLayer::validate(input, node.axis(), output, node.reduction_operation());
76 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, ArgMinMaxLayerNode::axis(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), ArgMinMaxLayerNode::reduction_operation(), and arm_compute::validate().

◆ validate_bounding_box_transform_layer()

Status arm_compute::graph::backends::detail::validate_bounding_box_transform_layer ( BoundingBoxTransformLayerNode node)

Validates a Bounding Box Transform layer node.

Template Parameters
BoundingBoxTransformLayerBounding Box Transform layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 87 of file ValidateHelpers.h.

88 {
89  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating BoundingBoxTransformLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
90  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
91  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
92 
93  // Extract IO and info
95  arm_compute::ITensorInfo *deltas = get_backing_tensor_info(node.input(1));
96  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
97  const BoundingBoxTransformInfo bbox_info = node.info();
98 
99  return BoundingBoxTransformLayer::validate(input, output, deltas, bbox_info);
100 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), BoundingBoxTransformLayerNode::info(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

◆ validate_channel_shuffle_layer()

Status arm_compute::graph::backends::detail::validate_channel_shuffle_layer ( ChannelShuffleLayerNode node)

Validates a Channel Shuffle layer node.

Template Parameters
ChannelShuffleLayerChannel Shuffle layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 111 of file ValidateHelpers.h.

112 {
113  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ChannelShuffle node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
114  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
115  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
116 
117  // Extract IO and info
119  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
120  const unsigned int num_groups = node.num_groups();
121 
123 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
const unsigned int num_groups
Definition: Im2Col.cpp:153
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), ChannelShuffleLayerNode::num_groups(), arm_compute::test::validation::num_groups, INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

◆ validate_convolution_layer()

Status arm_compute::graph::backends::detail::validate_convolution_layer ( ConvolutionLayerNode node)

Validates a Convolution layer node.

Template Parameters
ConvolutionLayerDefault Convolution layer function type
DirectConvolutionLayerDirect Convolution layer function type
GEMMConvolutionLayerGEMM Convolution layer function type
WinogradConvolutionLayerWinograd Convolution layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 137 of file ValidateHelpers.h.

138 {
139  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
140  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
141  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
142 
143  // Extract IO and info
145  arm_compute::ITensorInfo *weights = get_backing_tensor_info(node.input(1));
146  arm_compute::ITensorInfo *biases = get_backing_tensor_info(node.input(2));
147  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
148 
149  if(is_data_type_quantized_asymmetric(input->data_type()))
150  {
151  biases->set_data_type(DataType::S32);
152  }
153 
154  const PadStrideInfo conv_info = node.convolution_info();
155  const ConvolutionMethod conv_algorithm = node.convolution_method();
156  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
157  const unsigned int num_groups = node.num_groups();
158 
159  // Validate function
160  Status status{};
161  switch(conv_algorithm)
162  {
163  case ConvolutionMethod::Direct:
164  ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
165  status = DirectConvolutionLayer::validate(input, weights, biases, output, conv_info);
166  break;
167  case ConvolutionMethod::GEMM:
168  status = GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info,
169  WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), num_groups);
170  break;
171  case ConvolutionMethod::Winograd:
172  ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
173  status = WinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, ActivationLayerInfo(), fast_math);
174  break;
175  case ConvolutionMethod::Default:
176  status = ConvolutionLayer::validate(input, weights, biases, output, conv_info,
177  WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), fast_math, num_groups);
178  break;
179  default:
180  ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported convolution method");
181  }
182 
183  return status;
184 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
ConvolutionMethod
Available ConvolutionMethod.
Definition: Types.h:132
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
const unsigned int num_groups
Definition: Im2Col.cpp:153
virtual ITensorInfo & set_data_type(DataType data_type)=0
Set the data type to the specified value.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:989
#define ARM_COMPUTE_RETURN_ERROR_MSG(...)
An error is returned with the given description.
Definition: Error.h:194
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_MSG, ARM_COMPUTE_RETURN_ERROR_ON, ARM_COMPUTE_RETURN_ERROR_ON_MSG, arm_compute::test::validation::conv_info, ConvolutionLayerNode::convolution_info(), ConvolutionLayerNode::convolution_method(), arm_compute::graph::Default, arm_compute::graph::Direct, arm_compute::graph::Enabled, ConvolutionLayerNode::fast_math_hint(), arm_compute::graph::GEMM, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), INode::name(), ConvolutionLayerNode::num_groups(), arm_compute::test::validation::num_groups, INode::num_inputs(), INode::num_outputs(), INode::output(), arm_compute::S32, ITensorInfo::set_data_type(), arm_compute::validate(), and arm_compute::graph::Winograd.

Referenced by CLNodeValidator::validate(), and NENodeValidator::validate().

◆ validate_depth_to_space_layer()

Status arm_compute::graph::backends::detail::validate_depth_to_space_layer ( DepthToSpaceLayerNode node)

Validates a depth to space layer node.

Template Parameters
DequantizationLayerDequantize layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 234 of file ValidateHelpers.h.

235 {
236  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
237  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
238  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
239 
240  // Extract IO and info
242  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
243 
244  return DepthToSpaceLayer::validate(input, output, node.block_shape());
245 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, DepthToSpaceLayerNode::block_shape(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

◆ validate_depthwise_convolution_layer()

Status arm_compute::graph::backends::detail::validate_depthwise_convolution_layer ( DepthwiseConvolutionLayerNode node)

Validates a Depthwise Convolution layer node.

Template Parameters
DepthwiseConvolutionLayerDefault Depthwise Convolution layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 195 of file ValidateHelpers.h.

196 {
197  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
198  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
199  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
200 
201  // Extract IO and info
204  arm_compute::ITensorInfo *biases = get_backing_tensor_info(node.input(2));
205  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
206 
207  const PadStrideInfo conv_info = node.convolution_info();
208  const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
209  const int depth_multiplier = node.depth_multiplier();
210 
211  // Validate function
212  Status status{};
213  switch(dwc_algorithm)
214  {
215  case DepthwiseConvolutionMethod::Default:
216  case DepthwiseConvolutionMethod::Optimized3x3:
217  status = DepthwiseConvolutionLayer::validate(input, weights, biases, output, conv_info, depth_multiplier);
218  break;
219  default:
220  ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported depthwise convolution method");
221  }
222 
223  return status;
224 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
DepthwiseConvolutionMethod
Supported Depthwise Convolution layer methods.
Definition: Types.h:133
#define ARM_COMPUTE_RETURN_ERROR_MSG(...)
An error is returned with the given description.
Definition: Error.h:194
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_MSG, ARM_COMPUTE_RETURN_ERROR_ON, arm_compute::test::validation::conv_info, DepthwiseConvolutionLayerNode::convolution_info(), arm_compute::graph::Default, DepthwiseConvolutionLayerNode::depth_multiplier(), DepthwiseConvolutionLayerNode::depthwise_convolution_method(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), arm_compute::graph::Optimized3x3, INode::output(), and arm_compute::validate().

◆ validate_dequantization_layer()

Status arm_compute::graph::backends::detail::validate_dequantization_layer ( DequantizationLayerNode node)

Validates a dequantize layer node.

Template Parameters
DequantizationLayerDequantize layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 255 of file ValidateHelpers.h.

256 {
257  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
258  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
259  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
260 
261  // Extract IO and info
263  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
264 
265  return DequantizationLayer::validate(input, output);
266 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

◆ validate_detection_output_layer()

Status arm_compute::graph::backends::detail::validate_detection_output_layer ( DetectionOutputLayerNode node)

Validates a detection output layer node.

Template Parameters
DetectionOutputLayerDetectionOutput layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 276 of file ValidateHelpers.h.

277 {
278  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
279  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
280  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
281 
282  // Extract IO and info
283  arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
284  arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
285  arm_compute::ITensorInfo *input2 = get_backing_tensor_info(node.input(2));
286  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
287  const DetectionOutputLayerInfo detect_info = node.detection_output_info();
288 
289  return DetectionOutputLayer::validate(input0, input1, input2, output, detect_info);
290 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, DetectionOutputLayerNode::detection_output_info(), get_backing_tensor_info(), INode::id(), INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

◆ validate_detection_post_process_layer()

Status arm_compute::graph::backends::detail::validate_detection_post_process_layer ( DetectionPostProcessLayerNode node)

Validates a detection post process layer node.

Template Parameters
DetectionPostProcessLayerDetectionOutput layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 300 of file ValidateHelpers.h.

301 {
302  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionPostProcessLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
303  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
304  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 4);
305 
306  // Extract IO and info
307  arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
308  arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
309  arm_compute::ITensorInfo *input2 = get_backing_tensor_info(node.input(2));
310  arm_compute::ITensorInfo *output0 = get_backing_tensor_info(node.output(0));
311  arm_compute::ITensorInfo *output1 = get_backing_tensor_info(node.output(1));
312  arm_compute::ITensorInfo *output2 = get_backing_tensor_info(node.output(2));
313  arm_compute::ITensorInfo *output3 = get_backing_tensor_info(node.output(3));
314  const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
315 
316  return DetectionPostProcessLayer::validate(input0, input1, input2, output0, output1, output2, output3, detect_info);
317 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, DetectionPostProcessLayerNode::detection_post_process_info(), get_backing_tensor_info(), INode::id(), INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

◆ validate_eltwise_Layer()

Status arm_compute::graph::backends::detail::validate_eltwise_Layer ( EltwiseLayerNode node)

Validates a element-wise layer node.

Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 664 of file ValidateHelpers.h.

665 {
666  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
667  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
668  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
669 
670  // Extract input and output
671  const arm_compute::ITensorInfo *input1 = detail::get_backing_tensor_info(node.input(0));
672  const arm_compute::ITensorInfo *input2 = detail::get_backing_tensor_info(node.input(1));
673  const arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
674  const EltwiseOperation eltwise_op = node.eltwise_operation();
675  const ConvertPolicy convert_policy = node.convert_policy();
676  const RoundingPolicy round_policy = node.rounding_policy();
677  const ActivationLayerInfo act_info = node.fused_activation();
678  const QuantizationInfo quant_info = node.output_quant_info();
679 
680  // Validate function
681  if(eltwise_op == EltwiseOperation::Add)
682  {
683  return EltwiseLayerFunctions::ArithmeticAddition::validate(input1, input2, output, convert_policy, act_info);
684  }
685  else if(eltwise_op == EltwiseOperation::Sub)
686  {
687  return EltwiseLayerFunctions::ArithmeticSubtraction::validate(input1, input2, output, convert_policy, act_info);
688  }
689  else if(eltwise_op == EltwiseOperation::Mul)
690  {
691  return EltwiseLayerFunctions::PixelWiseMultiplication::validate(input1, input2, output, 1.0f, convert_policy, round_policy, act_info);
692  }
693  else if(eltwise_op == EltwiseOperation::Max)
694  {
695  return EltwiseLayerFunctions::ElementwiseMax::validate(input1, input2, output, act_info);
696  }
697  else if(eltwise_op == EltwiseOperation::Div)
698  {
699  return EltwiseLayerFunctions::ArithmeticDivision::validate(input1, input2, output, act_info);
700  }
701  else
702  {
703  ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
704  }
705  return Status{};
706 }
EltwiseOperation
Supported Element-wise operations.
Definition: Types.h:108
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
RoundingPolicy
Rounding method.
Definition: Rounding.h:30
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
ConvertPolicy
Policy to handle overflow.
Definition: Types.h:385
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References arm_compute::graph::Add, ARM_COMPUTE_ERROR, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, EltwiseLayerNode::convert_policy(), arm_compute::graph::Div, EltwiseLayerNode::eltwise_operation(), EltwiseLayerNode::fused_activation(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::graph::Max, arm_compute::graph::Mul, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), EltwiseLayerNode::output_quant_info(), EltwiseLayerNode::rounding_policy(), arm_compute::graph::Sub, and arm_compute::validate().

◆ validate_generate_proposals_layer()

Status arm_compute::graph::backends::detail::validate_generate_proposals_layer ( GenerateProposalsLayerNode node)

Validates a Generate Proposals layer node.

Template Parameters
GenerateProposalsLayerGenerate Proposals layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 328 of file ValidateHelpers.h.

329 {
330  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating GenerateProposalsLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
331  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
332  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 3);
333 
334  // Extract IO and info
338  arm_compute::ITensorInfo *proposals = get_backing_tensor_info(node.output(0));
339  arm_compute::ITensorInfo *scores_out = get_backing_tensor_info(node.output(1));
340  arm_compute::ITensorInfo *num_valid_proposals = get_backing_tensor_info(node.output(2));
341  const GenerateProposalsInfo info = node.info();
342 
343  return GenerateProposalsLayer::validate(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
344 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), GenerateProposalsLayerNode::info(), arm_compute::test::validation::info, INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

◆ validate_l2_normalize_layer()

Status arm_compute::graph::backends::detail::validate_l2_normalize_layer ( L2NormalizeLayerNode node)

Validates a L2Normalization layer node.

Template Parameters
L2Normalizationlayer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 355 of file ValidateHelpers.h.

356 {
357  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating L2NormalizeLayerNode node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
358  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
359  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
360 
361  // Extract IO and info
363  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
364  int axis = node.axis();
365  float epsilon = node.epsilon();
366 
367  // Validate function
368  return L2NormalizeLayer::validate(input, output, axis, epsilon);
369 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, L2NormalizeLayerNode::axis(), arm_compute::quantization::epsilon, L2NormalizeLayerNode::epsilon(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

◆ validate_node()

void arm_compute::graph::backends::detail::validate_node ( const INode node,
size_t  num_expected_inputs,
size_t  num_expected_outputs 
)

Definition at line 75 of file FunctionHelpers.h.

76 {
77  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
78  << " Target: " << TargetInfo::TargetType
79  << " ID: " << node.id()
80  << node.name()
81  << std::endl);
82 
83  ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
84  ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
85  ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
86  ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
87 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_UNUSED, INode::assigned_target(), INode::id(), INode::name(), INode::num_inputs(), INode::num_outputs(), and INode::type().

◆ validate_normalize_planar_yuv_layer()

Status arm_compute::graph::backends::detail::validate_normalize_planar_yuv_layer ( NormalizePlanarYUVLayerNode node)

Validates a NormalizePlanarYUV layer node.

Template Parameters
NormalizePlanarYUVLayerlayer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 380 of file ValidateHelpers.h.

381 {
382  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating NormalizePlanarYUVLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
383  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
384  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
385 
386  // Extract IO and info
390  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
391 
392  // Validate function
393  return NormalizePlanarYUVLayer::validate(input, output, mean, std);
394 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

◆ validate_pad_layer()

Status arm_compute::graph::backends::detail::validate_pad_layer ( PadLayerNode node)

Validates a pad layer node.

Template Parameters
PadLayerPad layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 405 of file ValidateHelpers.h.

406 {
407  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PadLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
408  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
409  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
410 
411  // Extract IO and info
413  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
414  const PaddingList &padding = node.padding();
415 
416  return PadLayer::validate(input, output, padding);
417 }
std::vector< PaddingInfo > PaddingList
List of padding information.
Definition: Types.h:434
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), PadLayerNode::padding(), and arm_compute::validate().

◆ validate_permute_layer()

Status arm_compute::graph::backends::detail::validate_permute_layer ( PermuteLayerNode node)

Validates a permute layer node.

Template Parameters
PermuteLayerPermute layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 428 of file ValidateHelpers.h.

429 {
430  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PermuteLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
431  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
432  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
433 
434  // Extract IO and info
436  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
437  const PermutationVector &perm = node.permutation_vector();
438 
439  return PermuteLayer::validate(input, output, perm);
440 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Strides PermutationVector
Permutation vector.
Definition: Types.h:49
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), PermuteLayerNode::permutation_vector(), and arm_compute::validate().

◆ validate_prelu_layer()

Status arm_compute::graph::backends::detail::validate_prelu_layer ( PReluLayerNode node)

Validates a PRelu layer node.

Template Parameters
PReluLayerPRelu layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 451 of file ValidateHelpers.h.

452 {
453  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PRelu node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
454  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
455  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
456 
457  // Extract IO and info
459  arm_compute::ITensorInfo *alpha = get_backing_tensor_info(node.input(1));
460  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
461 
462  return PReluLayer::validate(input, alpha, output);
463 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

◆ validate_priorbox_layer()

Status arm_compute::graph::backends::detail::validate_priorbox_layer ( PriorBoxLayerNode node)

Validates a priorbox layer node.

Template Parameters
PriorBoxLayerPriorBox layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 474 of file ValidateHelpers.h.

475 {
476  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PriorBoxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
477  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
478  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
479 
480  // Extract IO and info
481  arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
482  arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
483  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
484  const PriorBoxLayerInfo prior_info = node.priorbox_info();
485 
486  return PriorBoxLayer::validate(input0, input1, output, prior_info);
487 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), PriorBoxLayerNode::priorbox_info(), and arm_compute::validate().

◆ validate_quantization_layer()

Status arm_compute::graph::backends::detail::validate_quantization_layer ( QuantizationLayerNode node)

Validates a Quantization layer node.

Template Parameters
QuantizationLayerQuantization layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 498 of file ValidateHelpers.h.

499 {
500  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating QuantizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
501  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
502  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
503 
504  // Extract input and output
506  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
507 
508  // Validate function
509  return QuantizationLayer::validate(input, output);
510 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

◆ validate_reduction_operation_layer()

Status arm_compute::graph::backends::detail::validate_reduction_operation_layer ( ReductionLayerNode node)

Validates a Reduction operation layer node.

Template Parameters
ReductionLayerReduction layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 521 of file ValidateHelpers.h.

522 {
523  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReductionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
524 
525  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
526  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
527 
528  // Extract input and output
530  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
531 
532  // Validate function
533  return ReductionLayer::validate(input, output, node.axis(), node.op(), node.keep_dims());
534 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, ReductionLayerNode::axis(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, ReductionLayerNode::keep_dims(), INode::name(), INode::num_inputs(), INode::num_outputs(), ReductionLayerNode::op(), INode::output(), and arm_compute::validate().

◆ validate_reorg_layer()

Status arm_compute::graph::backends::detail::validate_reorg_layer ( ReorgLayerNode node)

Validates a Reorg layer node.

Template Parameters
ReorgLayerReorg layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 545 of file ValidateHelpers.h.

546 {
547  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReorgLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
548  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
549  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
550 
551  // Extract input and output
553  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
554 
555  // Validate function
556  return ReorgLayer::validate(input, output, node.stride());
557 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), ReorgLayerNode::stride(), and arm_compute::validate().

◆ validate_reshape_layer()

Status arm_compute::graph::backends::detail::validate_reshape_layer ( ReshapeLayerNode node)

Validates a Reshape layer node.

Template Parameters
ReshapeLayerReshape layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 568 of file ValidateHelpers.h.

569 {
570  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
571  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
572  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
573 
574  // Extract input and output
577 
578  // Validate function
579  return ReshapeLayer::validate(input, output);
580 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

◆ validate_roi_align_layer()

Status arm_compute::graph::backends::detail::validate_roi_align_layer ( ROIAlignLayerNode node)

Validates a ROI Align layer node.

Template Parameters
ROIAlignLayerROIAlign layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 591 of file ValidateHelpers.h.

592 {
593  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ROIAlignLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
594  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
595  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
596 
597  // Extract input and output
601  const ROIPoolingLayerInfo &pool_info = node.pooling_info();
602 
603  // Validate function
604  return ROIAlignLayer::validate(input, rois, output, pool_info);
605 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), ROIAlignLayerNode::pooling_info(), and arm_compute::validate().

◆ validate_slice_layer()

Status arm_compute::graph::backends::detail::validate_slice_layer ( SliceLayerNode node)

Validates a Slice layer node.

Template Parameters
SliceLayerSlice layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 616 of file ValidateHelpers.h.

617 {
618  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating Slice node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
619  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
620  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
621 
622  // Extract IO and info
624  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
625  const Coordinates starts = node.starts();
626  const Coordinates ends = node.ends();
627 
628  return SliceLayer::validate(input, output, starts, ends);
629 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, SliceLayerNode::ends(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), SliceLayerNode::starts(), and arm_compute::validate().

◆ validate_strided_slice_layer()

Status arm_compute::graph::backends::detail::validate_strided_slice_layer ( StridedSliceLayerNode node)

Validates a Strided Slice layer node.

Template Parameters
StridedSliceLayerStrided Slice layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 640 of file ValidateHelpers.h.

641 {
642  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating StridedSlice node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
643  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
644  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
645 
646  // Extract IO and info
648  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
649  const Coordinates starts = node.starts();
650  const Coordinates ends = node.ends();
651  const BiStrides strides = node.strides();
652  const StridedSliceLayerInfo info = node.strided_slice_info();
653 
654  return StridedSliceLayer::validate(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
655 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Coordinates BiStrides
Bidirectional strides.
Definition: Types.h:51
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, StridedSliceLayerNode::ends(), get_backing_tensor_info(), INode::id(), arm_compute::test::validation::info, INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), StridedSliceLayerNode::starts(), StridedSliceLayerNode::strided_slice_info(), StridedSliceLayerNode::strides(), and arm_compute::validate().

◆ validate_unary_eltwise_layer()

Status arm_compute::graph::backends::detail::validate_unary_eltwise_layer ( UnaryEltwiseLayerNode node)

Validates a unary element-wise layer node.

Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 714 of file ValidateHelpers.h.

715 {
716  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
717  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
718  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
719 
720  // Extract input and output
722  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
723  const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
724 
725  // Validate function
726  if(eltwise_op == UnaryEltwiseOperation::Exp)
727  {
729  }
730  else
731  {
732  ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
733  }
734 
735  return Status{};
736 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
Store the tensor's metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)
UnaryEltwiseOperation
Supported Unary Element-wise operations.
Definition: Types.h:118

References ARM_COMPUTE_ERROR, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, UnaryEltwiseLayerNode::eltwise_descriptor(), arm_compute::graph::Exp, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), UnaryEltwiseLayerDescriptor::op, INode::output(), and arm_compute::validate().