Compute Library
 21.11
arm_compute::graph::backends::detail Namespace Reference

Data Structures

class  BackendRegistrar
 Helper class to statically register a backend. More...
 

Functions

template<typename TargetInfo >
TargetInfo::TensorType * get_backing_tensor (arm_compute::graph::Tensor *tensor)
 Returns backing tensor of a given tensor. More...
 
template<typename TargetInfo >
void validate_node (const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
 
template<typename ActivationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_activation_layer (ActivationLayerNode &node)
 Creates a backend activation layer function. More...
 
template<typename ArgMinMaxLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_arg_min_max_layer (ArgMinMaxLayerNode &node)
 Creates a backend argminmax layer function. More...
 
template<typename BatchNormalizationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_batch_normalization_layer (BatchNormalizationLayerNode &node)
 Create a backend batch normalization layer function. More...
 
template<typename FusedLayerTypes , typename TargetInfo >
std::unique_ptr< IFunctioncreate_fused_convolution_batch_normalization_layer (FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
 Create a backend batch normalization layer function. More...
 
template<typename FusedLayerTypes , typename TargetInfo >
std::unique_ptr< IFunctioncreate_fused_depthwise_convolution_batch_normalization_layer (FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
 Create a backend fused depthwise convolution batch normalization layer function. More...
 
template<typename BoundingBoxTransformLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_bounding_box_transform_layer (BoundingBoxTransformLayerNode &node)
 Create a backend bounding box transform layer function. More...
 
template<typename ChannelShuffleLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_channel_shuffle_layer (ChannelShuffleLayerNode &node)
 Create a backend channel shuffle layer function. More...
 
template<typename ConcatenateLayerFunction , typename TargetInfo >
std::unique_ptr< arm_compute::IFunctioncreate_concatenate_layer (ConcatenateLayerNode &node)
 Create a backend layer concatenate function. More...
 
template<typename ConvolutionLayerFunctions , typename TargetInfo >
std::unique_ptr< IFunctioncreate_convolution_layer (ConvolutionLayerNode &node, GraphContext &ctx)
 Create a backend convolution layer function. More...
 
template<typename ConvolutionLayerFunctions , typename TargetInfo >
std::unique_ptr< IFunctioncreate_fused_convolution_with_post_op (FusedConvolutionWithPostOpNode &node, GraphContext &ctx)
 Create a backend convolution layer function with post opreator. More...
 
template<typename DeconvolutionLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_deconvolution_layer (DeconvolutionLayerNode &node, GraphContext &ctx)
 Create a backend deconvolution layer function. More...
 
template<typename DepthwiseConvolutionLayer , typename TargetInfo >
std::unique_ptr< IFunctioncreate_depthwise_convolution_layer (DepthwiseConvolutionLayerNode &node)
 Create a backend layer depth-wise convolution function. More...
 
template<typename DepthToSpaceLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_depth_to_space_layer (DepthToSpaceLayerNode &node)
 Create a backend depth to space layer function. More...
 
template<typename DequantizationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_dequantization_layer (DequantizationLayerNode &node)
 Create a backend dequantize layer function. More...
 
template<typename DetectionOutputLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_detection_output_layer (DetectionOutputLayerNode &node)
 Create a backend detection output layer function. More...
 
template<typename DetectionPostProcessLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_detection_post_process_layer (DetectionPostProcessLayerNode &node)
 Create a backend detection post process layer function. More...
 
template<typename EltwiseFunctions , typename TargetInfo >
std::unique_ptr< IFunctioncreate_eltwise_layer (EltwiseLayerNode &node)
 Create a backend element-wise operation layer function. More...
 
template<typename UnaryEltwiseFunctions , typename TargetInfo >
std::unique_ptr< IFunctioncreate_unary_eltwise_layer (UnaryEltwiseLayerNode &node)
 Create a backend unary element-wise operation layer function. More...
 
template<typename FlattenLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_flatten_layer (FlattenLayerNode &node)
 Create a backend flatten layer function. More...
 
template<typename FullyConnectedLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_fully_connected_layer (FullyConnectedLayerNode &node, GraphContext &ctx)
 Create a backend fully connected layer function. More...
 
template<typename GenerateProposalsLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_generate_proposals_layer (GenerateProposalsLayerNode &node, GraphContext &ctx)
 Create a backend generate proposals layer function. More...
 
template<typename L2NormalizeLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_l2_normalize_layer (L2NormalizeLayerNode &node, GraphContext &ctx)
 Create a backend l2 normalization layer function. More...
 
template<typename NormalizationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_normalization_layer (NormalizationLayerNode &node, GraphContext &ctx)
 Create a backend normalization layer function. More...
 
template<typename NormalizePlanarYUVLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_normalize_planar_yuv_layer (NormalizePlanarYUVLayerNode &node)
 Create a backend normalize planar YUV layer function. More...
 
template<typename PadLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_pad_layer (PadLayerNode &node)
 Create a backend pad layer function. More...
 
template<typename PermuteLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_permute_layer (PermuteLayerNode &node)
 Create a backend permute layer function. More...
 
template<typename PoolingLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_pooling_layer (PoolingLayerNode &node)
 Create a backend pooling layer function. More...
 
template<typename PReluFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_prelu_layer (PReluLayerNode &node)
 Create a backend PRelu layer function. More...
 
template<typename TargetInfo >
std::unique_ptr< IFunctioncreate_print_layer (PrintLayerNode &node)
 Create a backend print layer function. More...
 
template<typename PriorBoxLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_priorbox_layer (PriorBoxLayerNode &node)
 Create a backend priorbox layer function. More...
 
template<typename QuantizationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_quantization_layer (QuantizationLayerNode &node)
 Create a backend quantization layer function. More...
 
template<typename ReductionOperationFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_reduction_operation_layer (ReductionLayerNode &node, GraphContext &ctx)
 Create a backend reduction operation layer function. More...
 
template<typename ReorgLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_reorg_layer (ReorgLayerNode &node)
 Create a backend reorg layer function. More...
 
template<typename ReshapeLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_reshape_layer (ReshapeLayerNode &node)
 Create a backend reshape layer function. More...
 
template<typename ResizeLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_resize_layer (ResizeLayerNode &node)
 Create a backend resize layer function. More...
 
template<typename ROIAlignLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_roi_align_layer (ROIAlignLayerNode &node)
 Create a backend ROI align layer function. More...
 
template<typename SliceLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_slice_layer (SliceLayerNode &node)
 Create a backend slice layer function. More...
 
template<typename SoftmaxLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_softmax_layer (SoftmaxLayerNode &node, GraphContext &ctx)
 Create a backend softmax layer function. More...
 
template<typename StackLayerFunction , typename TargetInfo >
std::unique_ptr< arm_compute::IFunctioncreate_stack_layer (StackLayerNode &node)
 Create a backend layer stack function. More...
 
template<typename StridedSliceLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_strided_slice_layer (StridedSliceLayerNode &node)
 Create a backend slice layer function. More...
 
arm_compute::ITensorInfoget_backing_tensor_info (arm_compute::graph::Tensor *tensor)
 Returns backing tensor info of a given tensor. More...
 
template<typename ArgMinMaxLayer >
Status validate_arg_min_max_layer (ArgMinMaxLayerNode &node)
 Validates a ArgMinMax layer node. More...
 
template<typename BoundingBoxTransformLayer >
Status validate_bounding_box_transform_layer (BoundingBoxTransformLayerNode &node)
 Validates a Bounding Box Transform layer node. More...
 
template<typename ChannelShuffleLayer >
Status validate_channel_shuffle_layer (ChannelShuffleLayerNode &node)
 Validates a Channel Shuffle layer node. More...
 
template<typename ConvolutionLayer , typename DirectConvolutionLayer , typename GEMMConvolutionLayer , typename WinogradConvolutionLayer >
Status validate_convolution_layer (ConvolutionLayerNode &node)
 Validates a Convolution layer node. More...
 
template<typename GEMMConvolutionLayer >
Status validate_fused_convolution_with_post_op (FusedConvolutionWithPostOpNode &node)
 Validates a Convolution layer node. More...
 
template<typename DepthwiseConvolutionLayer >
Status validate_depthwise_convolution_layer (DepthwiseConvolutionLayerNode &node)
 Validates a Depthwise Convolution layer node. More...
 
template<typename DepthToSpaceLayer >
Status validate_depth_to_space_layer (DepthToSpaceLayerNode &node)
 Validates a depth to space layer node. More...
 
template<typename DequantizationLayer >
Status validate_dequantization_layer (DequantizationLayerNode &node)
 Validates a dequantize layer node. More...
 
template<typename DetectionOutputLayer >
Status validate_detection_output_layer (DetectionOutputLayerNode &node)
 Validates a detection output layer node. More...
 
template<typename DetectionPostProcessLayer >
Status validate_detection_post_process_layer (DetectionPostProcessLayerNode &node)
 Validates a detection post process layer node. More...
 
template<typename GenerateProposalsLayer >
Status validate_generate_proposals_layer (GenerateProposalsLayerNode &node)
 Validates a Generate Proposals layer node. More...
 
template<typename L2NormalizeLayer >
Status validate_l2_normalize_layer (L2NormalizeLayerNode &node)
 Validates a L2Normalization layer node. More...
 
template<typename NormalizePlanarYUVLayer >
Status validate_normalize_planar_yuv_layer (NormalizePlanarYUVLayerNode &node)
 Validates a NormalizePlanarYUV layer node. More...
 
template<typename PadLayer >
Status validate_pad_layer (PadLayerNode &node)
 Validates a pad layer node. More...
 
template<typename PermuteLayer >
Status validate_permute_layer (PermuteLayerNode &node)
 Validates a permute layer node. More...
 
template<typename PReluLayer >
Status validate_prelu_layer (PReluLayerNode &node)
 Validates a PRelu layer node. More...
 
template<typename PriorBoxLayer >
Status validate_priorbox_layer (PriorBoxLayerNode &node)
 Validates a priorbox layer node. More...
 
template<typename QuantizationLayer >
Status validate_quantization_layer (QuantizationLayerNode &node)
 Validates a Quantization layer node. More...
 
template<typename ReductionLayer >
Status validate_reduction_operation_layer (ReductionLayerNode &node)
 Validates a Reduction operation layer node. More...
 
template<typename ReorgLayer >
Status validate_reorg_layer (ReorgLayerNode &node)
 Validates a Reorg layer node. More...
 
template<typename ReshapeLayer >
Status validate_reshape_layer (ReshapeLayerNode &node)
 Validates a Reshape layer node. More...
 
template<typename ROIAlignLayer >
Status validate_roi_align_layer (ROIAlignLayerNode &node)
 Validates a ROI Align layer node. More...
 
template<typename SliceLayer >
Status validate_slice_layer (SliceLayerNode &node)
 Validates a Slice layer node. More...
 
template<typename StridedSliceLayer >
Status validate_strided_slice_layer (StridedSliceLayerNode &node)
 Validates a Strided Slice layer node. More...
 
template<typename EltwiseLayerFunctions >
Status validate_eltwise_Layer (EltwiseLayerNode &node)
 Validates a element-wise layer node. More...
 
template<typename UnaryEltwiseLayerFunctions >
Status validate_unary_eltwise_layer (UnaryEltwiseLayerNode &node)
 Validates a unary element-wise layer node. More...
 
template<>
std::unique_ptr< IFunctioncreate_detection_output_layer< CPPDetectionOutputLayer, CLTargetInfo > (DetectionOutputLayerNode &node)
 
template<>
std::unique_ptr< IFunctioncreate_detection_post_process_layer< CPPDetectionPostProcessLayer, CLTargetInfo > (DetectionPostProcessLayerNode &node)
 
template<>
std::unique_ptr< IFunctioncreate_normalization_layer< NENormalizationLayer, NETargetInfo > (NormalizationLayerNode &node, GraphContext &ctx)
 

Function Documentation

◆ create_activation_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_activation_layer ( ActivationLayerNode node)

Creates a backend activation layer function.

Template Parameters
ActivationLayerFunctionBackend activation function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend activation layer function

Definition at line 101 of file FunctionHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::input, and arm_compute::graph::backends::is_in_place_operation().

102 {
103  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
104 
105  // Extract IO and info
106  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
107  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
108  const ActivationLayerInfo act_info = node.activation_info();
109 
110  // Create function
111  auto func = std::make_unique<ActivationLayerFunction>();
112  func->configure(input, output, act_info);
113 
114  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
115  << node.name()
116  << " Type: " << node.type()
117  << " Target: " << TargetInfo::TargetType
118  << " Data Type: " << input->info()->data_type()
119  << " Shape: " << input->info()->tensor_shape()
120  << " Activation function: " << act_info.activation()
121  << " a: " << act_info.a()
122  << " b: " << act_info.b()
123  << " InPlace : " << is_in_place_operation(input, output)
124  << std::endl);
125 
126  return std::move(func);
127 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_in_place_operation(void *input, void *output)
Checks if an operation is in place.
Definition: Utils.h:77

◆ create_arg_min_max_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_arg_min_max_layer ( ArgMinMaxLayerNode node)

Creates a backend argminmax layer function.

Template Parameters
ArgMinMaxLayerFunctionBackend activation function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend argminmax layer function

Definition at line 139 of file FunctionHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

140 {
141  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
142 
143  // Extract IO and info
144  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
145  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
146  const ReductionOperation op = node.reduction_operation();
147  unsigned int axis = node.axis();
148 
149  // Create function
150  auto func = std::make_unique<ArgMinMaxLayerFunction>();
151  func->configure(input, axis, output, op);
152 
153  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
154  << node.name()
155  << " Type: " << node.type()
156  << " Target: " << TargetInfo::TargetType
157  << " Data Type: " << input->info()->data_type()
158  << " Shape: " << input->info()->tensor_shape()
159  << " Reduction Operation: " << op
160  << " axis: " << axis
161  << std::endl);
162 
163  return std::move(func);
164 }
ReductionOperation
Available reduction operations.
Definition: Types.h:463
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_batch_normalization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_batch_normalization_layer ( BatchNormalizationLayerNode node)

Create a backend batch normalization layer function.

Template Parameters
BatchNormalizationLayerFunctionBackend batch normalization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend batch normalization layer function

Definition at line 176 of file FunctionHelpers.h.

References ActivationLayerInfo::activation(), ARM_COMPUTE_LOG_GRAPH_INFO, ActivationLayerInfo::enabled(), arm_compute::quantization::epsilon, arm_compute::test::validation::input, arm_compute::graph::backends::is_in_place_operation(), and arm_compute::to_string().

177 {
178  validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
179 
180  // Extract IO and info
181  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
182  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
183  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
184  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
185  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
186 
187  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
188  const float epsilon = node.epsilon();
189  const ActivationLayerInfo fused_act = node.fused_activation();
190 
191  // Create and configure function
192  auto func = std::make_unique<BatchNormalizationLayerFunction>();
193  func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
194 
195  // Log info
196  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
197  << node.name()
198  << " Type: " << node.type()
199  << " Target: " << TargetInfo::TargetType
200  << " Data Type: " << input->info()->data_type()
201  << " Shape: " << input->info()->tensor_shape()
202  << " Epsilon: " << epsilon << " "
203  << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
204  << " InPlace: " << is_in_place_operation(input, output)
205  << std::endl);
206 
207  return std::move(func);
208 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const GEMMConfigNative &config)
Definition: Utils.cpp:156
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_in_place_operation(void *input, void *output)
Checks if an operation is in place.
Definition: Utils.h:77

◆ create_bounding_box_transform_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_bounding_box_transform_layer ( BoundingBoxTransformLayerNode node)

Create a backend bounding box transform layer function.

Template Parameters
BoundingBoxTransformLayerFunctionBackend bounding box transform function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend bounding box transform layer function

Definition at line 333 of file FunctionHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

334 {
335  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
336 
337  // Extract IO and info
338  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
339  typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
340  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
341  const BoundingBoxTransformInfo bbox_info = node.info();
342 
343  // Create and configure function
344  auto func = std::make_unique<BoundingBoxTransformLayerFunction>();
345  func->configure(input, output, deltas, bbox_info);
346 
347  // Log info
348  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
349  << node.name()
350  << " Type: " << node.type()
351  << " Target: " << TargetInfo::TargetType
352  << " Data Type: " << input->info()->data_type()
353  << " Shape: " << input->info()->tensor_shape()
354  << " BoundingBox Info img W: " << bbox_info.img_width() << " "
355  << " BoundingBox Info img H: " << bbox_info.img_height() << " "
356  << std::endl);
357 
358  return std::move(func);
359 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_channel_shuffle_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_channel_shuffle_layer ( ChannelShuffleLayerNode node)

Create a backend channel shuffle layer function.

Template Parameters
ChannelShuffleLayerFunctionBackend channel shuffle function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend channel shuffle layer function

Definition at line 371 of file FunctionHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::input, and arm_compute::test::validation::num_groups.

372 {
373  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
374 
375  // Extract IO and info
376  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
377  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
378  const unsigned int num_groups = node.num_groups();
379 
380  // Create function
381  auto func = std::make_unique<ChannelShuffleLayerFunction>();
382  func->configure(input, output, num_groups);
383 
384  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
385  << node.name()
386  << " Type: " << node.type()
387  << " Target: " << TargetInfo::TargetType
388  << " Data Type: " << input->info()->data_type()
389  << " Shape: " << input->info()->tensor_shape()
390  << " Num groups: " << num_groups
391  << std::endl);
392 
393  return std::move(func);
394 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
const unsigned int num_groups
Definition: Im2Col.cpp:153

◆ create_concatenate_layer()

std::unique_ptr<arm_compute::IFunction> arm_compute::graph::backends::detail::create_concatenate_layer ( ConcatenateLayerNode node)

Create a backend layer concatenate function.

Template Parameters
ConcatenateLayerFunctionBackend concatenate function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend concatenate layer function

Definition at line 406 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ConcatenateLayerNode::concatenation_axis(), arm_compute::test::validation::data_layout, Tensor::desc(), arm_compute::graph::get_dimension_idx(), INode::id(), INode::input(), arm_compute::is_data_type_quantized_asymmetric(), ConcatenateLayerNode::is_enabled(), TensorDescriptor::layout, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), ConcatenateLayerNode::type(), and arm_compute::UNKNOWN.

407 {
408  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
409  ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
410 
411  // Return nullptr if depth concatenate is switched off
412  if(!node.is_enabled())
413  {
414  return nullptr;
415  }
416 
417  // Extract IO and info
418  std::vector<typename TargetInfo::SrcTensorType *> inputs;
419  for(unsigned int i = 0; i < node.num_inputs(); ++i)
420  {
421  inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
422  }
423  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
424  const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
425  const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
426 
427  // Create and configure function
428  auto func = std::make_unique<ConcatenateLayerFunction>();
429  func->configure(inputs, output, concat_axis);
430 
431  // Log info
432  const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
433  std::ostringstream qss;
434  if(is_quantized)
435  {
436  qss << " Output QuantInfo: " << output->info()->quantization_info();
437  }
438  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
439  << node.name()
440  << " Type: " << node.type()
441  << " Target: " << TargetInfo::TargetType
442  << " Data Type: " << output->info()->data_type()
443  << " Shape: " << output->info()->tensor_shape()
444  << " Num Inputs: " << inputs.size()
445  << " Axis: " << concat_axis
446  << qss.str()
447  << std::endl);
448 
449  return std::move(func);
450 }
Unknown CL kernel type.
Definition: CLTypes.h:81
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1003
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
DataLayout
[DataLayout enum definition]
Definition: Types.h:113
size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get index of a tensor&#39;s given dimension depending on its layout.
Definition: Utils.cpp:148

◆ create_convolution_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_convolution_layer ( ConvolutionLayerNode node,
GraphContext ctx 
)

Create a backend convolution layer function.

Template Parameters
ConvolutionLayerFunctionsBackend convolution functions
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend convolution layer function

Definition at line 463 of file FunctionHelpers.h.

References ActivationLayerInfo::activation(), ARM_COMPUTE_ERROR_ON_MSG, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, arm_compute::graph::Direct, arm_compute::graph::Enabled, ActivationLayerInfo::enabled(), arm_compute::graph::GEMM, arm_compute::graph::backends::get_memory_manager(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), arm_compute::test::validation::num_groups, arm_compute::S32, arm_compute::to_string(), arm_compute::utils::cast::U, and arm_compute::graph::Winograd.

464 {
465  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
466 
467  // Extract IO and info
468  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
469  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
470  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
471  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
472 
473  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
474 
475  if(is_quantized)
476  {
477  biases->info()->set_data_type(DataType::S32);
478  }
479 
480  const PadStrideInfo conv_info = node.convolution_info();
481  const unsigned int num_groups = node.num_groups();
482  const ConvolutionMethod conv_algorithm = node.convolution_method();
483  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
484  const ActivationLayerInfo fused_act = node.fused_activation();
485 
486  // Create and configure function (we assume that functions have been validated before creation)
487  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
488  std::unique_ptr<IFunction> func;
489  std::string func_name;
490 
491  if(conv_algorithm == ConvolutionMethod::Winograd)
492  {
493  ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
494  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
495  std::string("WinogradConvolutionLayer"), mm,
496  input, weights, biases, output, conv_info, fused_act, fast_math);
497  }
498  else if(conv_algorithm == ConvolutionMethod::Direct)
499  {
500  ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
501  std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
502  std::string("DirectConvolutionLayer"),
503  input, weights, biases, output, conv_info, fused_act);
504  }
505  else if(conv_algorithm == ConvolutionMethod::GEMM)
506  {
507  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
508  std::string("GEMMConvolutionLayer"), mm,
509  input, weights, biases, output, conv_info,
510  WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
511  }
512  else
513  {
514  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
515  std::string("GenericConvolutionLayer"), mm,
516  input, weights, biases, output, conv_info,
517  WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
518  }
519 
520  // Log info
521  std::ostringstream qss;
522  if(is_quantized)
523  {
524  qss << " Input QuantInfo: " << input->info()->quantization_info()
525  << " Weights QuantInfo: " << weights->info()->quantization_info()
526  << " Output QuantInfo: " << output->info()->quantization_info();
527  }
528  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
529  << node.name()
530  << " Type: " << func_name
531  << " Target: " << TargetInfo::TargetType
532  << " Data Type: " << input->info()->data_type()
533  << " Groups: " << num_groups
534  << " Input shape: " << input->info()->tensor_shape()
535  << " Weights shape: " << weights->info()->tensor_shape()
536  << " Output shape: " << output->info()->tensor_shape()
537  << qss.str()
538  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
539  << std::endl);
540  return std::move(func);
541 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const GEMMConfigNative &config)
Definition: Utils.cpp:156
ConvolutionMethod
Available ConvolutionMethod.
Definition: Types.h:134
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
const unsigned int num_groups
Definition: Im2Col.cpp:153
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1003
GEMM CL kernel type.
Definition: CLTypes.h:85
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_deconvolution_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_deconvolution_layer ( DeconvolutionLayerNode node,
GraphContext ctx 
)

Create a backend deconvolution layer function.

Template Parameters
DeconvolutionLayerFunctionBackend deconvolution function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend deconvolution layer function

Definition at line 647 of file FunctionHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::backends::get_memory_manager(), and arm_compute::test::validation::input.

648 {
649  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
650 
651  // Extract IO and info
652  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
653  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
654  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
655  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
656 
657  const PadStrideInfo deconv_info = node.deconvolution_info();
658 
659  // Create and configure function (we assume that functions have been validated before creation)
660  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
661  std::unique_ptr<IFunction> func;
662 
663  std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
664  std::string(), mm,
665  input, weights, biases, output, deconv_info);
666 
667  // Log info
668  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
669  << node.name()
670  << " Type: " << node.type()
671  << " Target: " << TargetInfo::TargetType
672  << " Data Type: " << input->info()->data_type()
673  << " Input shape: " << input->info()->tensor_shape()
674  << " Weights shape: " << weights->info()->tensor_shape()
675  << " Output shape: " << output->info()->tensor_shape()
676  << std::endl);
677  return func;
678 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_depth_to_space_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_depth_to_space_layer ( DepthToSpaceLayerNode node)

Create a backend depth to space layer function.

Template Parameters
DepthToSpaceLayerNodeFunction Backend depth to space function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend depth to space layer function

Definition at line 752 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

753 {
754  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
755 
756  // Extract IO and info
757  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
758  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
759 
760  ARM_COMPUTE_ERROR_ON(input == nullptr);
761  ARM_COMPUTE_ERROR_ON(output == nullptr);
762 
763  // Create and configure function
764  auto func = std::make_unique<DepthToSpaceLayerFunction>();
765  func->configure(input, output, node.block_shape());
766 
767  // Log info
768  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
769  << node.name()
770  << " Type: " << node.type()
771  << " Target: " << TargetInfo::TargetType
772  << " Data Type: " << input->info()->data_type()
773  << " Input shape: " << input->info()->tensor_shape()
774  << " Block Size: " << node.block_shape()
775  << " Output shape: " << output->info()->tensor_shape()
776  << std::endl);
777 
778  return std::move(func);
779 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_depthwise_convolution_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_depthwise_convolution_layer ( DepthwiseConvolutionLayerNode node)

Create a backend layer depth-wise convolution function.

Template Parameters
DepthwiseConvolutionLayerFunctionsBackend depthwise convolution function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend depth-wise convolution layer function

Definition at line 690 of file FunctionHelpers.h.

References ActivationLayerInfo::activation(), ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, ActivationLayerInfo::enabled(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), arm_compute::S32, and arm_compute::to_string().

691 {
692  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
693 
694  // Extract IO and info
695  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
696  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
697  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
698  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
699 
700  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
701 
702  if(is_quantized)
703  {
704  biases->info()->set_data_type(DataType::S32);
705  }
706 
707  const PadStrideInfo conv_info = node.convolution_info();
708  const unsigned int depth_multiplier = node.depth_multiplier();
709  const ActivationLayerInfo fused_act = node.fused_activation();
710 
711  // Create and configure function (we assume that functions have been validated before creation)
712  std::unique_ptr<IFunction> func;
713  std::string func_name;
714 
715  std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
716  std::string("DepthwiseConvolutionLayer"),
717  input, weights, biases, output, conv_info, depth_multiplier, fused_act);
718 
719  // Log info
720  std::ostringstream qss;
721  if(is_quantized)
722  {
723  qss << " Input QuantInfo: " << input->info()->quantization_info()
724  << " Weights QuantInfo: " << weights->info()->quantization_info()
725  << " Output QuantInfo: " << output->info()->quantization_info();
726  }
727  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
728  << node.name()
729  << " Type: " << func_name
730  << " Target: " << TargetInfo::TargetType
731  << " Data Type: " << input->info()->data_type()
732  << " Input shape: " << input->info()->tensor_shape()
733  << " Weights shape: " << weights->info()->tensor_shape()
734  << " Output shape: " << output->info()->tensor_shape()
735  << " Depth multiplier: " << depth_multiplier
736  << qss.str()
737  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
738  << std::endl);
739  return std::move(func);
740 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const GEMMConfigNative &config)
Definition: Utils.cpp:156
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1003

◆ create_dequantization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_dequantization_layer ( DequantizationLayerNode node)

Create a backend dequantize layer function.

Template Parameters
DequantizationLayerFunction Backend dequantize function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend dequantize layer function

Definition at line 791 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

792 {
793  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
794 
795  // Extract IO and info
796  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
797  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
798 
799  ARM_COMPUTE_ERROR_ON(input == nullptr);
800  ARM_COMPUTE_ERROR_ON(output == nullptr);
801 
802  // Create and configure function
803  auto func = std::make_unique<DequantizationLayerFunction>();
804  func->configure(input, output);
805 
806  // Log info
807  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
808  << node.name()
809  << " Type: " << node.type()
810  << " Target: " << TargetInfo::TargetType
811  << " Data Type: " << input->info()->data_type()
812  << " Input shape: " << input->info()->tensor_shape()
813  << " Input quantization info: " << output->info()->quantization_info()
814  << " Output shape: " << output->info()->tensor_shape()
815  << std::endl);
816 
817  return std::move(func);
818 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_detection_output_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_detection_output_layer ( DetectionOutputLayerNode node)

Create a backend detection output layer function.

Template Parameters
DetectionOutputLayerFunction Backend detection output function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend detection output layer function

Definition at line 829 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, and ARM_COMPUTE_LOG_GRAPH_INFO.

830 {
831  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
832 
833  // Extract IO and info
834  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
835  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
836  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
837  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
838  const DetectionOutputLayerInfo detect_info = node.detection_output_info();
839 
840  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
841  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
842  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
843  ARM_COMPUTE_ERROR_ON(output == nullptr);
844 
845  // Create and configure function
846  auto func = std::make_unique<DetectionOutputLayerFunction>();
847  func->configure(input0, input1, input2, output, detect_info);
848 
849  // Log info
850  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
851  << node.name()
852  << " Type: " << node.type()
853  << " Target: " << TargetInfo::TargetType
854  << " Data Type: " << input0->info()->data_type()
855  << " Input0 shape: " << input0->info()->tensor_shape()
856  << " Input1 shape: " << input1->info()->tensor_shape()
857  << " Input2 shape: " << input2->info()->tensor_shape()
858  << " Output shape: " << output->info()->tensor_shape()
859  << " DetectionOutputLayer info: " << detect_info
860  << std::endl);
861 
862  return std::move(func);
863 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_detection_output_layer< CPPDetectionOutputLayer, CLTargetInfo >()

Definition at line 130 of file CLFunctionsFactory.cpp.

References ARM_COMPUTE_ERROR_ON, and ARM_COMPUTE_LOG_GRAPH_INFO.

Referenced by CLFunctionFactory::create().

131 {
132  validate_node<CLTargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
133 
134  // Extract IO and info
135  CLTargetInfo::TensorType *input0 = get_backing_tensor<CLTargetInfo>(node.input(0));
136  CLTargetInfo::TensorType *input1 = get_backing_tensor<CLTargetInfo>(node.input(1));
137  CLTargetInfo::TensorType *input2 = get_backing_tensor<CLTargetInfo>(node.input(2));
138  CLTargetInfo::TensorType *output = get_backing_tensor<CLTargetInfo>(node.output(0));
139  const DetectionOutputLayerInfo detect_info = node.detection_output_info();
140 
141  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
142  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
143  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
144  ARM_COMPUTE_ERROR_ON(output == nullptr);
145 
146  // Create and configure function
147  auto func = std::make_unique<CPPDetectionOutputLayer>();
148  func->configure(input0, input1, input2, output, detect_info);
149 
150  // Log info
151  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
152  << node.name()
153  << " Type: " << node.type()
154  << " Target: " << CLTargetInfo::TargetType
155  << " Data Type: " << input0->info()->data_type()
156  << " Input0 shape: " << input0->info()->tensor_shape()
157  << " Input1 shape: " << input1->info()->tensor_shape()
158  << " Input2 shape: " << input2->info()->tensor_shape()
159  << " Output shape: " << output->info()->tensor_shape()
160  << " DetectionOutputLayer info: " << detect_info
161  << std::endl);
162 
163  auto wrap_function = std::make_unique<CPPWrapperFunction>();
164 
165  wrap_function->register_function(std::move(func));
166  wrap_function->register_tensor(input0);
167  wrap_function->register_tensor(input1);
168  wrap_function->register_tensor(input2);
169  wrap_function->register_tensor(output);
170 
171  return std::move(wrap_function);
172 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_detection_post_process_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_detection_post_process_layer ( DetectionPostProcessLayerNode node)

Create a backend detection post process layer function.

Template Parameters
DetectionPostProcessLayerFunctionBackend detection output function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend detection post process layer function

Definition at line 875 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, and ARM_COMPUTE_LOG_GRAPH_INFO.

876 {
877  validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
878 
879  // Extract IO and info
880  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
881  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
882  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
883  typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
884  typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
885  typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
886  typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
887  const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
888 
889  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
890  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
891  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
892  ARM_COMPUTE_ERROR_ON(output0 == nullptr);
893  ARM_COMPUTE_ERROR_ON(output1 == nullptr);
894  ARM_COMPUTE_ERROR_ON(output2 == nullptr);
895  ARM_COMPUTE_ERROR_ON(output3 == nullptr);
896 
897  // Create and configure function
898  auto func = std::make_unique<DetectionPostProcessLayerFunction>();
899  func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
900 
901  // Log info
902  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
903  << node.name()
904  << " Type: " << node.type()
905  << " Target: " << TargetInfo::TargetType
906  << " Data Type: " << input0->info()->data_type()
907  << " Input0 shape: " << input0->info()->tensor_shape()
908  << " Input1 shape: " << input1->info()->tensor_shape()
909  << " Input2 shape: " << input2->info()->tensor_shape()
910  << " Output0 shape: " << output0->info()->tensor_shape()
911  << " Output1 shape: " << output1->info()->tensor_shape()
912  << " Output2 shape: " << output2->info()->tensor_shape()
913  << " Output3 shape: " << output3->info()->tensor_shape()
914  << " DetectionPostProcessLayer info: " << detect_info
915  << std::endl);
916 
917  return std::move(func);
918 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_detection_post_process_layer< CPPDetectionPostProcessLayer, CLTargetInfo >()

Definition at line 174 of file CLFunctionsFactory.cpp.

References ARM_COMPUTE_ERROR_ON, and ARM_COMPUTE_LOG_GRAPH_INFO.

Referenced by CLFunctionFactory::create().

175 {
176  validate_node<CLTargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
177 
178  // Extract IO and info
179  CLTargetInfo::TensorType *input0 = get_backing_tensor<CLTargetInfo>(node.input(0));
180  CLTargetInfo::TensorType *input1 = get_backing_tensor<CLTargetInfo>(node.input(1));
181  CLTargetInfo::TensorType *input2 = get_backing_tensor<CLTargetInfo>(node.input(2));
182  CLTargetInfo::TensorType *output0 = get_backing_tensor<CLTargetInfo>(node.output(0));
183  CLTargetInfo::TensorType *output1 = get_backing_tensor<CLTargetInfo>(node.output(1));
184  CLTargetInfo::TensorType *output2 = get_backing_tensor<CLTargetInfo>(node.output(2));
185  CLTargetInfo::TensorType *output3 = get_backing_tensor<CLTargetInfo>(node.output(3));
186  const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
187 
188  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
189  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
190  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
191  ARM_COMPUTE_ERROR_ON(output0 == nullptr);
192  ARM_COMPUTE_ERROR_ON(output1 == nullptr);
193  ARM_COMPUTE_ERROR_ON(output2 == nullptr);
194  ARM_COMPUTE_ERROR_ON(output3 == nullptr);
195 
196  // Create and configure function
197  auto func = std::make_unique<CPPDetectionPostProcessLayer>();
198  func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
199 
200  // Log info
201  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
202  << node.name()
203  << " Type: " << node.type()
204  << " Target: " << CLTargetInfo::TargetType
205  << " Data Type: " << input0->info()->data_type()
206  << " Input0 shape: " << input0->info()->tensor_shape()
207  << " Input1 shape: " << input1->info()->tensor_shape()
208  << " Input2 shape: " << input2->info()->tensor_shape()
209  << " Output0 shape: " << output0->info()->tensor_shape()
210  << " Output1 shape: " << output1->info()->tensor_shape()
211  << " Output2 shape: " << output2->info()->tensor_shape()
212  << " Output3 shape: " << output3->info()->tensor_shape()
213  << " DetectionPostProcessLayer info: " << detect_info
214  << std::endl);
215 
216  auto wrap_function = std::make_unique<CPPWrapperFunction>();
217 
218  wrap_function->register_function(std::move(func));
219  wrap_function->register_tensor(input0);
220  wrap_function->register_tensor(input1);
221  wrap_function->register_tensor(input2);
222  wrap_function->register_tensor(output0);
223  wrap_function->register_tensor(output1);
224  wrap_function->register_tensor(output2);
225  wrap_function->register_tensor(output3);
226 
227  return std::move(wrap_function);
228 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_eltwise_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_eltwise_layer ( EltwiseLayerNode node)

Create a backend element-wise operation layer function.

Template Parameters
EltwiseFunctionsBackend element-wise function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend element-wise operation layer function

Definition at line 930 of file FunctionHelpers.h.

References arm_compute::graph::Add, ARM_COMPUTE_ERROR, ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::Div, arm_compute::graph::Max, arm_compute::graph::Mul, and arm_compute::graph::Sub.

931 {
932  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
933 
934  // Extract IO and info
935  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
936  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
937  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
938  const EltwiseOperation eltwise_op = node.eltwise_operation();
939  const ConvertPolicy convert_policy = node.convert_policy();
940  const ActivationLayerInfo act_info = node.fused_activation();
941  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
942  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
943  ARM_COMPUTE_ERROR_ON(output == nullptr);
944 
945  std::unique_ptr<IFunction> func = nullptr;
946  std::string func_name;
947  if(eltwise_op == EltwiseOperation::Add)
948  {
949  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
950  std::string("ArithmeticAddition"),
951  input1, input2, output, convert_policy, act_info);
952  }
953  else if(eltwise_op == EltwiseOperation::Sub)
954  {
955  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
956  std::string("ArithmeticSubtraction"),
957  input1, input2, output, convert_policy, act_info);
958  }
959  else if(eltwise_op == EltwiseOperation::Mul)
960  {
961  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
962  std::string("PixelWiseMultiplication"),
963  input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
964  }
965  else if(eltwise_op == EltwiseOperation::Max)
966  {
967  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
968  std::string("ElementwiseMaximum"),
969  input1, input2, output, act_info);
970  }
971  else if(eltwise_op == EltwiseOperation::Div)
972  {
973  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Division>(
974  std::string("ArithmeticDivision"),
975  input1, input2, output, act_info);
976  }
977  else
978  {
979  ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
980  }
981 
982  // Log info
983  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
984  << node.name()
985  << " Type: " << node.type()
986  << " Target: " << TargetInfo::TargetType
987  << " Operation: " << func_name
988  << " Data Type: " << input1->info()->data_type()
989  << " Shape: " << input1->info()->tensor_shape()
990  << std::endl);
991 
992  return std::move(func);
993 }
EltwiseOperation
Supported Element-wise operations.
Definition: Types.h:109
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
ConvertPolicy
Policy to handle integer overflow.
Definition: Types.h:391

◆ create_flatten_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_flatten_layer ( FlattenLayerNode node)

Create a backend flatten layer function.

Template Parameters
FlattenLayerFunctionBackend flatten function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend flatten layer function

Definition at line 1053 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1054 {
1055  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1056 
1057  // Extract IO and info
1058  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1059  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1060 
1061  ARM_COMPUTE_ERROR_ON(input == nullptr);
1062  ARM_COMPUTE_ERROR_ON(output == nullptr);
1063 
1064  // Create and configure function
1065  auto func = std::make_unique<FlattenLayerFunction>();
1066  func->configure(input, output);
1067 
1068  // Log info
1069  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1070  << node.name()
1071  << " Type: " << node.type()
1072  << " Target: " << TargetInfo::TargetType
1073  << " Data Type: " << input->info()->data_type()
1074  << " Input shape: " << input->info()->tensor_shape()
1075  << " Output shape: " << output->info()->tensor_shape()
1076  << std::endl);
1077 
1078  return std::move(func);
1079 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_fully_connected_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_fully_connected_layer ( FullyConnectedLayerNode node,
GraphContext ctx 
)

Create a backend fully connected layer function.

Template Parameters
FullyConnectedLayerFunctionBackend fully-connected function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend fully connected layer function

Definition at line 1092 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, FullyConnectedLayerInfo::enable_fast_math, arm_compute::graph::Enabled, arm_compute::graph::backends::get_memory_manager(), arm_compute::graph::backends::get_weights_manager(), arm_compute::test::validation::input, and arm_compute::is_data_type_quantized_asymmetric().

1093 {
1094  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1095 
1096  // Extract IO and info
1097  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1098  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
1099  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
1100  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1101  FullyConnectedLayerInfo fc_info = node.info();
1102  fc_info.enable_fast_math = (node.fast_math_hint() == FastMathHint::Enabled);
1103 
1104  ARM_COMPUTE_ERROR_ON(input == nullptr);
1105  ARM_COMPUTE_ERROR_ON(weights == nullptr);
1106  ARM_COMPUTE_ERROR_ON(output == nullptr);
1107 
1108  // Create and configure function
1109  auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
1110  auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
1111  auto func = std::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
1112  func->configure(input, weights, biases, output, fc_info);
1113 
1114  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
1115 
1116  // Log info
1117  std::ostringstream qss;
1118  if(is_quantized)
1119  {
1120  qss << " Input QuantInfo: " << input->info()->quantization_info()
1121  << " Weights QuantInfo: " << weights->info()->quantization_info()
1122  << " Output QuantInfo: " << output->info()->quantization_info();
1123  }
1124  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1125  << node.name()
1126  << " Type: " << node.type()
1127  << " Target: " << TargetInfo::TargetType
1128  << " Data Type: " << input->info()->data_type()
1129  << qss.str()
1130  << " Input shape: " << input->info()->tensor_shape()
1131  << " Weights shape: " << weights->info()->tensor_shape()
1132  << " Output shape: " << output->info()->tensor_shape()
1133  << std::endl);
1134 
1135  return std::move(func);
1136 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1003
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89
std::shared_ptr< IWeightsManager > get_weights_manager(GraphContext &ctx, Target target)
Returns the weights manager for a given target.
Definition: Utils.h:102

◆ create_fused_convolution_batch_normalization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_fused_convolution_batch_normalization_layer ( FusedConvolutionBatchNormalizationNode node,
GraphContext ctx 
)

Create a backend batch normalization layer function.

Template Parameters
BatchNormalizationLayerFunctionBackend batch normalization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend batch normalization layer function

Definition at line 221 of file FunctionHelpers.h.

References ActivationLayerInfo::activation(), ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, arm_compute::graph::Enabled, ActivationLayerInfo::enabled(), arm_compute::quantization::epsilon, arm_compute::graph::backends::get_memory_manager(), arm_compute::test::validation::input, arm_compute::test::validation::num_groups, and arm_compute::to_string().

222 {
223  validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
224 
225  // Extract IO and info
226  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
227  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
228  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
229  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
230  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
231  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
232  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
233 
234  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
235 
236  const PadStrideInfo conv_info = node.convolution_info();
237  const unsigned int num_groups = node.num_groups();
238  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
239  const ActivationLayerInfo fused_act = node.fused_activation();
240  const float epsilon = node.epsilon();
241 
242  // Create and configure function (we assume that functions have been validated before creation)
243  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
244  std::unique_ptr<IFunction> func;
245  std::string func_name;
246 
247  using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
248 
249  // Create and configure function
250  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
251  std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
252 
253  // Log info
254  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
255  << node.name()
256  << " Type: " << node.type()
257  << " Target: " << TargetInfo::TargetType
258  << " Data Type: " << input->info()->data_type()
259  << " Input shape: " << input->info()->tensor_shape()
260  << " Weights shape: " << weights->info()->tensor_shape()
261  << " Output shape: " << output->info()->tensor_shape()
262  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
263  << std::endl);
264  return std::move(func);
265 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const GEMMConfigNative &config)
Definition: Utils.cpp:156
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
const unsigned int num_groups
Definition: Im2Col.cpp:153
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_fused_convolution_with_post_op()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_fused_convolution_with_post_op ( FusedConvolutionWithPostOpNode node,
GraphContext ctx 
)

Create a backend convolution layer function with post opreator.

Template Parameters
ConvolutionLayerFunctionsBackend convolution functions
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend convolution layer function

Definition at line 554 of file FunctionHelpers.h.

References arm_compute::experimental::Activation, ARM_COMPUTE_ERROR, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, arm_compute::graph::backends::get_memory_manager(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), arm_compute::test::validation::num_groups, arm_compute::test::validation::post_ops, arm_compute::S32, arm_compute::to_string(), and arm_compute::utils::cast::U.

555 {
556  validate_node<TargetInfo>(node, 4 /* expected inputs */, 1 /* expected outputs */);
557 
558  // Extract IO and info
559  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
560  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
561  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
562  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
563 
564  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
565 
566  if(is_quantized)
567  {
568  biases->info()->set_data_type(DataType::S32);
569  }
570 
571  const PadStrideInfo conv_info = node.convolution_info();
572  const unsigned int num_groups = node.num_groups();
573  const ActivationLayerInfo fused_act = node.fused_activation();
574 
575  experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
576 
577  auto &post_op_info_list = node.post_op_info_list();
578  for(const auto &post_op_info : post_op_info_list)
579  {
580  switch(post_op_info->type())
581  {
583  {
584  const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
585  post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
586  break;
587  }
588  case PostOpType::Eltwise_Add:
589  {
590  typename TargetInfo::TensorType *add_input = get_backing_tensor<TargetInfo>(node.input(3));
591  const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
592  post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
593  break;
594  }
595  default:
596  {
597  ARM_COMPUTE_ERROR("Unsupported PostOpType");
598  }
599  }
600  }
601 
602  // Create and configure function (we assume that functions have been validated before creation)
603  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
604  std::unique_ptr<IFunction> func;
605  std::string func_name;
606 
607  // Fuse convolution with post ops is only supported for conv1x1, which is only implemented as gemmconv2d
608  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
609  std::string("GEMMConvolutionLayer"), mm,
610  input, weights, biases, output, conv_info,
611  WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups, post_ops);
612 
613  // Log info
614  std::ostringstream qss;
615  if(is_quantized)
616  {
617  qss << " Input QuantInfo: " << input->info()->quantization_info()
618  << " Weights QuantInfo: " << weights->info()->quantization_info()
619  << " Output QuantInfo: " << output->info()->quantization_info();
620  }
621  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
622  << node.name()
623  << " Type: " << func_name
624  << " Target: " << TargetInfo::TargetType
625  << " Data Type: " << input->info()->data_type()
626  << " Groups: " << num_groups
627  << " Input shape: " << input->info()->tensor_shape()
628  << " Weights shape: " << weights->info()->tensor_shape()
629  << " Output shape: " << output->info()->tensor_shape()
630  << qss.str()
631  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
632  << std::endl);
633  return std::move(func);
634 }
experimental::PostOpList< ITensorInfo * > post_ops
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const GEMMConfigNative &config)
Definition: Utils.cpp:156
arm_compute::ActivationLayerInfo::ActivationFunction Activation
Constant TensorID specifying an equivalent of null tensor.
Definition: Types.h:73
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
const unsigned int num_groups
Definition: Im2Col.cpp:153
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1003
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_fused_depthwise_convolution_batch_normalization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_fused_depthwise_convolution_batch_normalization_layer ( FusedDepthwiseConvolutionBatchNormalizationNode node,
GraphContext ctx 
)

Create a backend fused depthwise convolution batch normalization layer function.

Template Parameters
FusedLayerTypesFused layer types
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend fused depthwise convolution batch normalization layer function

Definition at line 278 of file FunctionHelpers.h.

References ActivationLayerInfo::activation(), ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, ActivationLayerInfo::enabled(), arm_compute::quantization::epsilon, arm_compute::graph::backends::get_memory_manager(), arm_compute::test::validation::input, and arm_compute::to_string().

279 {
280  validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
281 
282  // Extract IO and info
283  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
284  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
285  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
286  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
287  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
288  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
289  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
290 
291  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
292 
293  const PadStrideInfo conv_info = node.convolution_info();
294  const unsigned int depth_multiplier = node.depth_multiplier();
295  const ActivationLayerInfo fused_act = node.fused_activation();
296  const float epsilon = node.epsilon();
297 
298  // Create and configure function (we assume that functions have been validated before creation)
299  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
300  std::unique_ptr<IFunction> func;
301  std::string func_name;
302 
303  using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
304 
305  // Create and configure function
306  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
307  std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
308 
309  // Log info
310  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
311  << node.name()
312  << " Type: " << node.type()
313  << " Target: " << TargetInfo::TargetType
314  << " Data Type: " << input->info()->data_type()
315  << " Input shape: " << input->info()->tensor_shape()
316  << " Weights shape: " << weights->info()->tensor_shape()
317  << " Output shape: " << output->info()->tensor_shape()
318  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
319  << std::endl);
320  return std::move(func);
321 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const GEMMConfigNative &config)
Definition: Utils.cpp:156
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_generate_proposals_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_generate_proposals_layer ( GenerateProposalsLayerNode node,
GraphContext ctx 
)

Create a backend generate proposals layer function.

Template Parameters
GenerateProposalsLayerFunctionBackend generate proposals function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend generate proposals layer function

Definition at line 1149 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::backends::get_memory_manager(), and arm_compute::test::validation::info.

1150 {
1151  validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
1152 
1153  // Extract IO and info
1154  typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
1155  typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
1156  typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
1157  typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
1158  typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
1159  typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
1160  const GenerateProposalsInfo info = node.info();
1161 
1162  ARM_COMPUTE_ERROR_ON(scores == nullptr);
1163  ARM_COMPUTE_ERROR_ON(deltas == nullptr);
1164  ARM_COMPUTE_ERROR_ON(anchors == nullptr);
1165  ARM_COMPUTE_ERROR_ON(proposals == nullptr);
1166  ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
1167 
1168  // Create and configure function
1169  auto func = std::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1170  func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1171 
1172  // Log info
1173  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1174  << " Target " << TargetInfo::TargetType
1175  << " Data Type: " << scores->info()->data_type()
1176  << " Scores shape: " << scores->info()->tensor_shape()
1177  << " Deltas shape: " << deltas->info()->tensor_shape()
1178  << " Anchors shape: " << anchors->info()->tensor_shape()
1179  << " Proposals shape: " << proposals->info()->tensor_shape()
1180  << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1181  << " Scores Out shape: " << scores_out->info()->tensor_shape()
1182  << std::endl);
1183 
1184  return std::move(func);
1185 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_l2_normalize_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_l2_normalize_layer ( L2NormalizeLayerNode node,
GraphContext ctx 
)

Create a backend l2 normalization layer function.

Template Parameters
NormalizationLayerFunctionBackend normalization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend normalization layer function

Definition at line 1198 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::quantization::epsilon, arm_compute::graph::backends::get_memory_manager(), and arm_compute::test::validation::input.

1199 {
1200  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1201 
1202  // Extract IO and info
1203  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1204  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1205  int axis = node.axis();
1206  float epsilon = node.epsilon();
1207 
1208  ARM_COMPUTE_ERROR_ON(input == nullptr);
1209  ARM_COMPUTE_ERROR_ON(output == nullptr);
1210 
1211  // Create and configure function
1212  auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
1213  auto func = std::make_unique<L2NormalizeLayerFunction>(mm);
1214  func->configure(input, output, axis, epsilon);
1215 
1216  // Log info
1217  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1218  << node.name()
1219  << " Type: " << node.type()
1220  << " Target: " << TargetInfo::TargetType
1221  << " Data Type: " << input->info()->data_type()
1222  << " Input shape: " << input->info()->tensor_shape()
1223  << " Output shape: " << output->info()->tensor_shape()
1224  << " Axis: " << axis
1225  << " Epsilon: " << epsilon
1226  << std::endl);
1227 
1228  return std::move(func);
1229 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_normalization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_normalization_layer ( NormalizationLayerNode node,
GraphContext ctx 
)

Create a backend normalization layer function.

Template Parameters
NormalizationLayerFunctionBackend normalization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend normalization layer function

Definition at line 1242 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ARM_COMPUTE_UNUSED, and arm_compute::test::validation::input.

1243 {
1244  ARM_COMPUTE_UNUSED(ctx);
1245 
1246  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1247 
1248  // Extract IO and info
1249  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1250  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1251  const NormalizationLayerInfo norm_info = node.normalization_info();
1252  ARM_COMPUTE_ERROR_ON(input == nullptr);
1253  ARM_COMPUTE_ERROR_ON(output == nullptr);
1254 
1255  // Create and configure function
1256  auto func = std::make_unique<NormalizationLayerFunction>();
1257  func->configure(input, output, norm_info);
1258 
1259  // Log info
1260  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1261  << node.name()
1262  << " Type: " << node.type()
1263  << " Target: " << TargetInfo::TargetType
1264  << " Data Type: " << input->info()->data_type()
1265  << " Input shape: " << input->info()->tensor_shape()
1266  << " Output shape: " << output->info()->tensor_shape()
1267  << " Normalization info: " << norm_info.type()
1268  << std::endl);
1269 
1270  return std::move(func);
1271 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152

◆ create_normalization_layer< NENormalizationLayer, NETargetInfo >()

Definition at line 93 of file NEFunctionFactory.cpp.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::backends::get_memory_manager(), and arm_compute::test::validation::input.

Referenced by NEFunctionFactory::create().

94 {
95  validate_node<NETargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
96 
97  // Extract IO and info
98  NETargetInfo::TensorType *input = get_backing_tensor<NETargetInfo>(node.input(0));
99  NETargetInfo::TensorType *output = get_backing_tensor<NETargetInfo>(node.output(0));
100  const NormalizationLayerInfo norm_info = node.normalization_info();
101  ARM_COMPUTE_ERROR_ON(input == nullptr);
102  ARM_COMPUTE_ERROR_ON(output == nullptr);
103 
104  // Create and configure function
105  auto func = std::make_unique<NENormalizationLayer>(get_memory_manager(ctx, NETargetInfo::TargetType));
106  func->configure(input, output, norm_info);
107 
108  // Log info
109  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
110  << node.name()
111  << " Type: " << node.type()
112  << " Target: " << NETargetInfo::TargetType
113  << " Data Type: " << input->info()->data_type()
114  << " Input shape: " << input->info()->tensor_shape()
115  << " Output shape: " << output->info()->tensor_shape()
116  << " Normalization info: " << norm_info.type()
117  << std::endl);
118 
119  return std::move(func);
120 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_normalize_planar_yuv_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_normalize_planar_yuv_layer ( NormalizePlanarYUVLayerNode node)

Create a backend normalize planar YUV layer function.

Template Parameters
NormalizePlanarYUVLayerFunctionBackend normalize planar YUV function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend normalize plnar YUV layer function

Definition at line 1283 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1284 {
1285  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1286 
1287  // Extract IO and info
1288  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1289  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1290  typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1291  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1292  ARM_COMPUTE_ERROR_ON(input == nullptr);
1293  ARM_COMPUTE_ERROR_ON(mean == nullptr);
1294  ARM_COMPUTE_ERROR_ON(std == nullptr);
1295  ARM_COMPUTE_ERROR_ON(output == nullptr);
1296 
1297  // Create and configure function
1298  auto func = std::make_unique<NormalizePlanarYUVLayerFunction>();
1299  func->configure(input, output, mean, std);
1300 
1301  // Log info
1302  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1303  << node.name()
1304  << " Type: " << node.type()
1305  << " Target: " << TargetInfo::TargetType
1306  << " Data Type: " << input->info()->data_type()
1307  << " Shape: " << input->info()->tensor_shape()
1308  << std::endl);
1309 
1310  return std::move(func);
1311 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_pad_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_pad_layer ( PadLayerNode node)

Create a backend pad layer function.

Template Parameters
PadLayerFunctionBackend pad function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend pad layer function

Definition at line 1323 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1324 {
1325  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1326 
1327  // Extract IO and info
1328  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1329  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1330  const PaddingList &padding = node.padding();
1331  const PixelValue pad_value = node.pad_value();
1332  ARM_COMPUTE_ERROR_ON(input == nullptr);
1333  ARM_COMPUTE_ERROR_ON(output == nullptr);
1334 
1335  // Create and configure function
1336  auto func = std::make_unique<PadLayerFunction>();
1337  func->configure(input, output, padding, pad_value);
1338 
1339  // Log info
1340  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1341  << node.name()
1342  << " Type: " << node.type()
1343  << " Target: " << TargetInfo::TargetType
1344  << " Data Type: " << input->info()->data_type()
1345  << " Input shape: " << input->info()->tensor_shape()
1346  << " Output shape: " << output->info()->tensor_shape()
1347  << std::endl);
1348 
1349  return std::move(func);
1350 }
std::vector< PaddingInfo > PaddingList
List of padding information.
Definition: Types.h:440
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_permute_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_permute_layer ( PermuteLayerNode node)

Create a backend permute layer function.

Template Parameters
PermuteLayerFunctionBackend permute function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend permute layer function

Definition at line 1362 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1363 {
1364  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1365 
1366  // Extract IO and info
1367  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1368  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1369  const PermutationVector &perm = node.permutation_vector();
1370  ARM_COMPUTE_ERROR_ON(input == nullptr);
1371  ARM_COMPUTE_ERROR_ON(output == nullptr);
1372 
1373  // Create and configure function
1374  auto func = std::make_unique<PermuteLayerFunction>();
1375  func->configure(input, output, perm);
1376 
1377  // Log info
1378  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1379  << node.name()
1380  << " Type: " << node.type()
1381  << " Target: " << TargetInfo::TargetType
1382  << " Data Type: " << input->info()->data_type()
1383  << " Input shape: " << input->info()->tensor_shape()
1384  << " Output shape: " << output->info()->tensor_shape()
1385  << " Permutation vector: " << perm
1386  << std::endl);
1387 
1388  return std::move(func);
1389 }
TensorType
Memory type.
Definition: Types.h:38
Strides PermutationVector
Permutation vector.
Definition: Types.h:51
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_pooling_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_pooling_layer ( PoolingLayerNode node)

Create a backend pooling layer function.

Template Parameters
PoolingLayerFunctionBackend pooling function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend pooling layer function

Definition at line 1401 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1402 {
1403  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1404 
1405  // Extract IO and info
1406  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1407  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1408  const PoolingLayerInfo pool_info = node.pooling_info();
1409  ARM_COMPUTE_ERROR_ON(input == nullptr);
1410  ARM_COMPUTE_ERROR_ON(output == nullptr);
1411 
1412  // Create and configure function
1413  auto func = std::make_unique<PoolingLayerFunction>();
1414  func->configure(input, output, pool_info);
1415 
1416  // Log info
1417  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1418  << node.name()
1419  << " Type: " << node.type()
1420  << " Target: " << TargetInfo::TargetType
1421  << " Data Type: " << input->info()->data_type()
1422  << " Input shape: " << input->info()->tensor_shape()
1423  << " Output shape: " << output->info()->tensor_shape()
1424  << " Pooling info: " << pool_info.pool_type
1425  << std::endl);
1426 
1427  return std::move(func);
1428 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_prelu_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_prelu_layer ( PReluLayerNode node)

Create a backend PRelu layer function.

Template Parameters
PReluFunctionBackend PRelu function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend PRelu layer function

Definition at line 1440 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1441 {
1442  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1443 
1444  // Extract IO and info
1445  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1446  typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1447  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1448  ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1449  ARM_COMPUTE_ERROR_ON(output == nullptr);
1450 
1451  // Create and configure function
1452  auto func = std::make_unique<PReluFunction>();
1453  func->configure(input, alpha, output);
1454 
1455  // Log info
1456  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1457  << node.name()
1458  << " Type: " << node.type()
1459  << " Target: " << TargetInfo::TargetType
1460  << " Data Type: " << input->info()->data_type()
1461  << " Input shape: " << input->info()->tensor_shape()
1462  << " Output shape: " << output->info()->tensor_shape()
1463  << std::endl);
1464 
1465  return std::move(func);
1466 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_print_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_print_layer ( PrintLayerNode node)

Create a backend print layer function.

Template Parameters
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend print layer function

Definition at line 1477 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ARM_COMPUTE_UNUSED, and arm_compute::test::validation::input.

1478 {
1479  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1480 
1481  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1482  ARM_COMPUTE_ERROR_ON(input == nullptr);
1483  ARM_COMPUTE_UNUSED(input);
1484 
1485  // Log info
1486  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1487  << node.name()
1488  << " Type: " << node.type()
1489  << " Target: " << TargetInfo::TargetType
1490  << " Data Type: " << input->info()->data_type()
1491  << " Input shape: " << input->info()->tensor_shape()
1492  << std::endl);
1493 
1494  return nullptr;
1495 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152

◆ create_priorbox_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_priorbox_layer ( PriorBoxLayerNode node)

Create a backend priorbox layer function.

Template Parameters
PriorBoxLayerFunctionBackend priorbox function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend priorbox layer function

Definition at line 1507 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, and ARM_COMPUTE_LOG_GRAPH_INFO.

1508 {
1509  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1510 
1511  // Extract IO and info
1512  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1513  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1514  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1515  const PriorBoxLayerInfo prior_info = node.priorbox_info();
1516  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1517  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1518  ARM_COMPUTE_ERROR_ON(output == nullptr);
1519 
1520  // Create and configure function
1521  auto func = std::make_unique<PriorBoxLayerFunction>();
1522  func->configure(input0, input1, output, prior_info);
1523 
1524  // Log info
1525  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1526  << node.name()
1527  << " Type: " << node.type()
1528  << " Target: " << TargetInfo::TargetType
1529  << " Data Type: " << input0->info()->data_type()
1530  << " Input0 shape: " << input0->info()->tensor_shape()
1531  << " Input1 shape: " << input1->info()->tensor_shape()
1532  << " Output shape: " << output->info()->tensor_shape()
1533  << " PriorBoxLayer info: " << prior_info
1534  << std::endl);
1535 
1536  return std::move(func);
1537 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_quantization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_quantization_layer ( QuantizationLayerNode node)

Create a backend quantization layer function.

Template Parameters
QuantizationLayerFunctionBackend quantization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend quantization layer function

Definition at line 1549 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1550 {
1551  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1552 
1553  // Extract IO and info
1554  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1555  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1556  ARM_COMPUTE_ERROR_ON(input == nullptr);
1557  ARM_COMPUTE_ERROR_ON(output == nullptr);
1558 
1559  // Create and configure function
1560  auto func = std::make_unique<QuantizationLayerFunction>();
1561  func->configure(input, output);
1562 
1563  // Log info
1564  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1565  << node.name()
1566  << " Type: " << node.type()
1567  << " Target: " << TargetInfo::TargetType
1568  << " Data Type: " << input->info()->data_type()
1569  << " Input shape: " << input->info()->tensor_shape()
1570  << " Output shape: " << output->info()->tensor_shape()
1571  << std::endl);
1572 
1573  return std::move(func);
1574 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_reduction_operation_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_reduction_operation_layer ( ReductionLayerNode node,
GraphContext ctx 
)

Create a backend reduction operation layer function.

Template Parameters
ReductionOperationFunctionBackend reduction operation function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend reduction sum layer function

Definition at line 1587 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::backends::get_memory_manager(), and arm_compute::test::validation::input.

1588 {
1589  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1590 
1591  // Extract IO and info
1592  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1593  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1594  ReductionOperation op = node.op();
1595  int axis = node.axis();
1596  bool keep_dims = node.keep_dims();
1597  ARM_COMPUTE_ERROR_ON(input == nullptr);
1598  ARM_COMPUTE_ERROR_ON(output == nullptr);
1599 
1600  // Create and configure function
1601  auto func = std::make_unique<ReductionOperationFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1602  func->configure(input, output, axis, op, keep_dims);
1603 
1604  // Log info
1605  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1606  << node.name()
1607  << " Type: " << node.type()
1608  << " Target: " << TargetInfo::TargetType
1609  << " Data Type: " << input->info()->data_type()
1610  << " Input shape: " << input->info()->tensor_shape()
1611  << " Output shape: " << output->info()->tensor_shape()
1612  << " Operation: " << op
1613  << " Axis: " << axis
1614  << " Keep dimensions:" << keep_dims
1615  << std::endl);
1616 
1617  return std::move(func);
1618 }
ReductionOperation
Available reduction operations.
Definition: Types.h:463
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_reorg_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_reorg_layer ( ReorgLayerNode node)

Create a backend reorg layer function.

Template Parameters
ReorgLayerFunctionBackend reorg function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend reshape layer function

Definition at line 1630 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1631 {
1632  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1633 
1634  // Extract IO and info
1635  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1636  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1637  ARM_COMPUTE_ERROR_ON(input == nullptr);
1638  ARM_COMPUTE_ERROR_ON(output == nullptr);
1639 
1640  // Create and configure function
1641  auto func = std::make_unique<ReorgLayerFunction>();
1642  func->configure(input, output, node.stride());
1643 
1644  // Log info
1645  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1646  << node.name()
1647  << " Type: " << node.type()
1648  << " Target: " << TargetInfo::TargetType
1649  << " Data Type: " << input->info()->data_type()
1650  << " Input shape: " << input->info()->tensor_shape()
1651  << " Output shape: " << output->info()->tensor_shape()
1652  << std::endl);
1653 
1654  return std::move(func);
1655 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_reshape_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_reshape_layer ( ReshapeLayerNode node)

Create a backend reshape layer function.

Template Parameters
ReshapeLayerFunctionBackend reshape function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend reshape layer function

Definition at line 1667 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1668 {
1669  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1670 
1671  // Extract IO and info
1672  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1673  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1674  ARM_COMPUTE_ERROR_ON(input == nullptr);
1675  ARM_COMPUTE_ERROR_ON(output == nullptr);
1676 
1677  // Create and configure function
1678  auto func = std::make_unique<ReshapeLayerFunction>();
1679  func->configure(input, output);
1680 
1681  // Log info
1682  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1683  << node.name()
1684  << " Type: " << node.type()
1685  << " Target: " << TargetInfo::TargetType
1686  << " Data Type: " << input->info()->data_type()
1687  << " Input shape: " << input->info()->tensor_shape()
1688  << " Output shape: " << output->info()->tensor_shape()
1689  << std::endl);
1690 
1691  return std::move(func);
1692 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_resize_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_resize_layer ( ResizeLayerNode node)

Create a backend resize layer function.

Template Parameters
ResizeLayerFunctionBackend resize function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend resize layer function

Definition at line 1704 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::CENTER, arm_compute::CONSTANT, and arm_compute::test::validation::input.

1705 {
1706  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1707 
1708  // Extract IO and info
1709  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1710  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1711  ARM_COMPUTE_ERROR_ON(input == nullptr);
1712  ARM_COMPUTE_ERROR_ON(output == nullptr);
1713  const InterpolationPolicy policy = node.policy();
1714 
1715  // Create and configure function
1716  auto func = std::make_unique<ResizeLayerFunction>();
1717  func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false });
1718 
1719  // Log info
1720  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1721  << node.name()
1722  << " Type: " << node.type()
1723  << " Target: " << TargetInfo::TargetType
1724  << " Data Type: " << input->info()->data_type()
1725  << " Input shape: " << input->info()->tensor_shape()
1726  << " Output shape: " << output->info()->tensor_shape()
1727  << " Interpolation: " << policy
1728  << std::endl);
1729 
1730  return std::move(func);
1731 }
InterpolationPolicy
Interpolation method.
Definition: Types.h:398
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_roi_align_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_roi_align_layer ( ROIAlignLayerNode node)

Create a backend ROI align layer function.

Template Parameters
ROIAlignLayerFunctionROI Align function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
ROI Align layer function

Definition at line 1743 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::input, ROIPoolingLayerInfo::pooled_height(), and ROIPoolingLayerInfo::pooled_width().

1744 {
1745  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1746 
1747  // Extract IO and info
1748  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1749  typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1750  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1751  ARM_COMPUTE_ERROR_ON(input == nullptr);
1752  ARM_COMPUTE_ERROR_ON(output == nullptr);
1753  ARM_COMPUTE_ERROR_ON(rois == nullptr);
1754 
1755  const ROIPoolingLayerInfo pool_info = node.pooling_info();
1756 
1757  // Create and configure function
1758  auto func = std::make_unique<ROIAlignLayerFunction>();
1759 
1760  func->configure(input, rois, output, pool_info);
1761 
1762  // Log info
1763  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1764  << node.name()
1765  << " Type: " << node.type()
1766  << " Target: " << TargetInfo::TargetType
1767  << " Data Type: " << input->info()->data_type()
1768  << " Input shape: " << input->info()->tensor_shape()
1769  << " Output shape: " << output->info()->tensor_shape()
1770  << " ROIs shape: " << rois->info()->tensor_shape()
1771  << " ROIPooling width: " << pool_info.pooled_width()
1772  << " ROIPooling height: " << pool_info.pooled_height()
1773  << std::endl);
1774 
1775  return std::move(func);
1776 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_slice_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_slice_layer ( SliceLayerNode node)

Create a backend slice layer function.

Template Parameters
SliceLayerFunctionBackend slice function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend slice layer function

Definition at line 1788 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1789 {
1790  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1791 
1792  // Extract IO and info
1793  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1794  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1795  ARM_COMPUTE_ERROR_ON(input == nullptr);
1796  ARM_COMPUTE_ERROR_ON(output == nullptr);
1797 
1798  // Create and configure function
1799  auto func = std::make_unique<SliceLayerFunction>();
1800  func->configure(input, output, node.starts(), node.ends());
1801 
1802  // Log info
1803  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1804  << node.name()
1805  << " Type: " << node.type()
1806  << " Target: " << TargetInfo::TargetType
1807  << " Data Type: " << input->info()->data_type()
1808  << " Input shape: " << input->info()->tensor_shape()
1809  << " Output shape: " << output->info()->tensor_shape()
1810  << std::endl);
1811 
1812  return std::move(func);
1813 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_softmax_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_softmax_layer ( SoftmaxLayerNode node,
GraphContext ctx 
)

Create a backend softmax layer function.

Template Parameters
SoftmaxLayerFunctionBackend softmax function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend softmax layer function

Definition at line 1826 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::backends::get_memory_manager(), and arm_compute::test::validation::input.

1827 {
1828  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1829 
1830  // Extract IO and info
1831  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1832  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1833  const float beta = node.beta();
1834  ARM_COMPUTE_ERROR_ON(input == nullptr);
1835  ARM_COMPUTE_ERROR_ON(output == nullptr);
1836 
1837  // Create and configure function
1838  auto func = std::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1839  func->configure(input, output, beta);
1840 
1841  // Log info
1842  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1843  << node.name()
1844  << " Type: " << node.type()
1845  << " Target: " << TargetInfo::TargetType
1846  << " Data Type: " << input->info()->data_type()
1847  << " Input shape: " << input->info()->tensor_shape()
1848  << " Output shape: " << output->info()->tensor_shape()
1849  << std::endl);
1850 
1851  return std::move(func);
1852 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_stack_layer()

std::unique_ptr<arm_compute::IFunction> arm_compute::graph::backends::detail::create_stack_layer ( StackLayerNode node)

Create a backend layer stack function.

Template Parameters
StackLayerFunctionBackend stack function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend stack layer function

Definition at line 1864 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ARM_COMPUTE_LOG_GRAPH_VERBOSE, StackLayerNode::axis(), INode::id(), INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and StackLayerNode::type().

1865 {
1866  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1867  ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1868 
1869  // Extract IO and info
1870  std::vector<typename TargetInfo::TensorType *> inputs;
1871  for(unsigned int i = 0; i < node.num_inputs(); ++i)
1872  {
1873  inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1874  }
1875  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1876  const int axis = node.axis();
1877 
1878  // Create and configure function
1879  auto func = std::make_unique<StackLayerFunction>();
1880  func->configure(inputs, axis, output);
1881 
1882  // Log info
1883  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1884  << node.name()
1885  << " Type: " << node.type()
1886  << " Target: " << TargetInfo::TargetType
1887  << " Data Type: " << output->info()->data_type()
1888  << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1889  << " Output shape: " << output->info()->tensor_shape()
1890  << " Num Inputs: " << inputs.size()
1891  << " Axis: " << axis
1892  << std::endl);
1893 
1894  return std::move(func);
1895 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ create_strided_slice_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_strided_slice_layer ( StridedSliceLayerNode node)

Create a backend slice layer function.

Template Parameters
StridedSliceLayerFunctionBackend strided slice function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend strided slice layer function

Definition at line 1907 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, StridedSliceLayerInfo::begin_mask(), StridedSliceLayerInfo::end_mask(), arm_compute::test::validation::info, arm_compute::test::validation::input, and StridedSliceLayerInfo::shrink_axis_mask().

1908 {
1909  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1910 
1911  // Extract IO and info
1912  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1913  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1914  Coordinates starts = node.starts();
1915  Coordinates ends = node.ends();
1916  BiStrides strides = node.strides();
1917  StridedSliceLayerInfo info = node.strided_slice_info();
1918 
1919  ARM_COMPUTE_ERROR_ON(input == nullptr);
1920  ARM_COMPUTE_ERROR_ON(output == nullptr);
1921 
1922  // Create and configure function
1923  auto func = std::make_unique<StridedSliceLayerFunction>();
1924  func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
1925 
1926  // Log info
1927  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1928  << node.name()
1929  << " Type: " << node.type()
1930  << " Target: " << TargetInfo::TargetType
1931  << " Data Type: " << input->info()->data_type()
1932  << " Input shape: " << input->info()->tensor_shape()
1933  << " Output shape: " << output->info()->tensor_shape()
1934  << std::endl);
1935 
1936  return std::move(func);
1937 }
TensorType
Memory type.
Definition: Types.h:38
Coordinates BiStrides
Bidirectional strides.
Definition: Types.h:53
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)

◆ create_unary_eltwise_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_unary_eltwise_layer ( UnaryEltwiseLayerNode node)

Create a backend unary element-wise operation layer function.

Template Parameters
UnaryEltwiseFunctionsBackend unary element-wise function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend unary element-wise operation layer function

Definition at line 1005 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR, ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::Exp, and arm_compute::test::validation::input.

1006 {
1007  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1008 
1009  // Extract IO and info
1010  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1011  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1012  const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
1013 
1014  ARM_COMPUTE_ERROR_ON(input == nullptr);
1015  ARM_COMPUTE_ERROR_ON(output == nullptr);
1016 
1017  std::unique_ptr<IFunction> func = nullptr;
1018  std::string func_name;
1019  if(eltwise_op == UnaryEltwiseOperation::Exp)
1020  {
1021  std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
1022  std::string("Exp"),
1023  input, output);
1024  }
1025  else
1026  {
1027  ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
1028  }
1029 
1030  // Log info
1031  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1032  << node.name()
1033  << " Type: " << node.type()
1034  << " Target: " << TargetInfo::TargetType
1035  << " Operation: " << func_name
1036  << " Data Type: " << input->info()->data_type()
1037  << " Shape: " << input->info()->tensor_shape()
1038  << std::endl);
1039 
1040  return std::move(func);
1041 }
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
UnaryEltwiseOperation
Supported Unary Element-wise operations.
Definition: Types.h:120

◆ get_backing_tensor()

TargetInfo::TensorType* arm_compute::graph::backends::detail::get_backing_tensor ( arm_compute::graph::Tensor tensor)

Returns backing tensor of a given tensor.

Template Parameters
TargetInfoTarget information
Parameters
[in]tensorTensor to extract the backing tensor from
Returns
Backing tensor if present else nullptr

Definition at line 61 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, Tensor::desc(), Tensor::handle(), and TensorDescriptor::target.

62 {
63  typename TargetInfo::TensorType *backing_tensor = nullptr;
64  if(tensor != nullptr)
65  {
66  ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
67  // Get backing tensor handle
68  ITensorHandle *tensor_handle = tensor->handle();
69  // Get backing tensor
70  backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
71  }
72 
73  return backing_tensor;
74 }
ITensorHandle * handle()
Backend tensor handle accessor.
Definition: Tensor.cpp:55
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
TensorDescriptor & desc()
TensorInfo metadata accessor.
Definition: Tensor.cpp:40

◆ get_backing_tensor_info()

arm_compute::ITensorInfo* arm_compute::graph::backends::detail::get_backing_tensor_info ( arm_compute::graph::Tensor tensor)
inline

Returns backing tensor info of a given tensor.

Parameters
[in]tensorTensor to extract the backing tensor from
Returns
Backing tensor tensor info if present else nullptr

Definition at line 50 of file ValidateHelpers.h.

References Tensor::handle(), ITensor::info(), and ITensorHandle::tensor().

Referenced by validate_arg_min_max_layer(), validate_bounding_box_transform_layer(), validate_channel_shuffle_layer(), validate_convolution_layer(), validate_depth_to_space_layer(), validate_depthwise_convolution_layer(), validate_dequantization_layer(), validate_detection_output_layer(), validate_detection_post_process_layer(), validate_eltwise_Layer(), validate_fused_convolution_with_post_op(), validate_generate_proposals_layer(), validate_l2_normalize_layer(), validate_normalize_planar_yuv_layer(), validate_pad_layer(), validate_permute_layer(), validate_prelu_layer(), validate_priorbox_layer(), validate_quantization_layer(), validate_reduction_operation_layer(), validate_reorg_layer(), validate_reshape_layer(), validate_roi_align_layer(), validate_slice_layer(), validate_strided_slice_layer(), and validate_unary_eltwise_layer().

51 {
52  return ((tensor == nullptr) || (tensor->handle() == nullptr)) ? nullptr : tensor->handle()->tensor().info();
53 }
ITensorHandle * handle()
Backend tensor handle accessor.
Definition: Tensor.cpp:55
virtual arm_compute::ITensor & tensor()=0
Backend tensor object accessor.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.

◆ validate_arg_min_max_layer()

Status arm_compute::graph::backends::detail::validate_arg_min_max_layer ( ArgMinMaxLayerNode node)

Validates a ArgMinMax layer node.

Template Parameters
ArgMinMaxlayer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 64 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, ArgMinMaxLayerNode::axis(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), ArgMinMaxLayerNode::reduction_operation(), and arm_compute::validate().

65 {
66  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ArgMinMaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
67  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
68  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
69 
70  // Extract IO and info
72  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
73 
74  // Validate function
75  return ArgMinMaxLayer::validate(input, node.axis(), output, node.reduction_operation());
76 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_bounding_box_transform_layer()

Status arm_compute::graph::backends::detail::validate_bounding_box_transform_layer ( BoundingBoxTransformLayerNode node)

Validates a Bounding Box Transform layer node.

Template Parameters
BoundingBoxTransformLayerBounding Box Transform layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 87 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), BoundingBoxTransformLayerNode::info(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

88 {
89  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating BoundingBoxTransformLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
90  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
91  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
92 
93  // Extract IO and info
95  arm_compute::ITensorInfo *deltas = get_backing_tensor_info(node.input(1));
96  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
97  const BoundingBoxTransformInfo bbox_info = node.info();
98 
99  return BoundingBoxTransformLayer::validate(input, output, deltas, bbox_info);
100 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_channel_shuffle_layer()

Status arm_compute::graph::backends::detail::validate_channel_shuffle_layer ( ChannelShuffleLayerNode node)

Validates a Channel Shuffle layer node.

Template Parameters
ChannelShuffleLayerChannel Shuffle layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 111 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), ChannelShuffleLayerNode::num_groups(), arm_compute::test::validation::num_groups, INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

112 {
113  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ChannelShuffle node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
114  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
115  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
116 
117  // Extract IO and info
119  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
120  const unsigned int num_groups = node.num_groups();
121 
122  return ChannelShuffleLayer::validate(input, output, num_groups);
123 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
const unsigned int num_groups
Definition: Im2Col.cpp:153
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_convolution_layer()

Status arm_compute::graph::backends::detail::validate_convolution_layer ( ConvolutionLayerNode node)

Validates a Convolution layer node.

Template Parameters
ConvolutionLayerDefault Convolution layer function type
DirectConvolutionLayerDirect Convolution layer function type
GEMMConvolutionLayerGEMM Convolution layer function type
WinogradConvolutionLayerWinograd Convolution layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 137 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_MSG, ARM_COMPUTE_RETURN_ERROR_ON, ARM_COMPUTE_RETURN_ERROR_ON_MSG, arm_compute::test::validation::conv_info, ConvolutionLayerNode::convolution_info(), ConvolutionLayerNode::convolution_method(), arm_compute::graph::Default, arm_compute::graph::Direct, arm_compute::graph::Enabled, ConvolutionLayerNode::fast_math_hint(), arm_compute::graph::GEMM, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), INode::name(), ConvolutionLayerNode::num_groups(), arm_compute::test::validation::num_groups, INode::num_inputs(), INode::num_outputs(), INode::output(), arm_compute::S32, ITensorInfo::set_data_type(), arm_compute::validate(), and arm_compute::graph::Winograd.

Referenced by CLNodeValidator::validate(), and NENodeValidator::validate().

138 {
139  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
140  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
141  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
142 
143  // Extract IO and info
145  arm_compute::ITensorInfo *weights = get_backing_tensor_info(node.input(1));
146  arm_compute::ITensorInfo *biases = get_backing_tensor_info(node.input(2));
147  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
148 
149  if(is_data_type_quantized_asymmetric(input->data_type()))
150  {
151  biases->set_data_type(DataType::S32);
152  }
153 
154  const PadStrideInfo conv_info = node.convolution_info();
155  const ConvolutionMethod conv_algorithm = node.convolution_method();
156  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
157  const unsigned int num_groups = node.num_groups();
158 
159  // Validate function
160  Status status{};
161  switch(conv_algorithm)
162  {
163  case ConvolutionMethod::Direct:
164  ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
165  status = DirectConvolutionLayer::validate(input, weights, biases, output, conv_info);
166  break;
168  status = GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info,
169  WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), num_groups);
170  break;
171  case ConvolutionMethod::Winograd:
172  ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
173  status = WinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, ActivationLayerInfo(), fast_math);
174  break;
175  case ConvolutionMethod::Default:
176  status = ConvolutionLayer::validate(input, weights, biases, output, conv_info,
177  WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), fast_math, num_groups);
178  break;
179  default:
180  ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported convolution method");
181  }
182 
183  return status;
184 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
ConvolutionMethod
Available ConvolutionMethod.
Definition: Types.h:134
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
const unsigned int num_groups
Definition: Im2Col.cpp:153
virtual ITensorInfo & set_data_type(DataType data_type)=0
Set the data type to the specified value.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1003
GEMM CL kernel type.
Definition: CLTypes.h:85
#define ARM_COMPUTE_RETURN_ERROR_MSG(...)
An error is returned with the given description.
Definition: Error.h:194
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_depth_to_space_layer()

Status arm_compute::graph::backends::detail::validate_depth_to_space_layer ( DepthToSpaceLayerNode node)

Validates a depth to space layer node.

Template Parameters
DequantizationLayerDequantize layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 270 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, DepthToSpaceLayerNode::block_shape(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

271 {
272  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
273  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
274  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
275 
276  // Extract IO and info
278  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
279 
280  return DepthToSpaceLayer::validate(input, output, node.block_shape());
281 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_depthwise_convolution_layer()

Status arm_compute::graph::backends::detail::validate_depthwise_convolution_layer ( DepthwiseConvolutionLayerNode node)

Validates a Depthwise Convolution layer node.

Template Parameters
DepthwiseConvolutionLayerDefault Depthwise Convolution layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 231 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_MSG, ARM_COMPUTE_RETURN_ERROR_ON, arm_compute::test::validation::conv_info, DepthwiseConvolutionLayerNode::convolution_info(), arm_compute::graph::Default, DepthwiseConvolutionLayerNode::depth_multiplier(), DepthwiseConvolutionLayerNode::depthwise_convolution_method(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), arm_compute::graph::Optimized3x3, INode::output(), and arm_compute::validate().

232 {
233  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
234  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
235  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
236 
237  // Extract IO and info
240  arm_compute::ITensorInfo *biases = get_backing_tensor_info(node.input(2));
241  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
242 
243  const PadStrideInfo conv_info = node.convolution_info();
244  const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
245  const int depth_multiplier = node.depth_multiplier();
246 
247  // Validate function
248  Status status{};
249  switch(dwc_algorithm)
250  {
251  case DepthwiseConvolutionMethod::Default:
252  case DepthwiseConvolutionMethod::Optimized3x3:
253  status = DepthwiseConvolutionLayer::validate(input, weights, biases, output, conv_info, depth_multiplier);
254  break;
255  default:
256  ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported depthwise convolution method");
257  }
258 
259  return status;
260 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
DepthwiseConvolutionMethod
Supported Depthwise Convolution layer methods.
Definition: Types.h:135
#define ARM_COMPUTE_RETURN_ERROR_MSG(...)
An error is returned with the given description.
Definition: Error.h:194
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_dequantization_layer()

Status arm_compute::graph::backends::detail::validate_dequantization_layer ( DequantizationLayerNode node)

Validates a dequantize layer node.

Template Parameters
DequantizationLayerDequantize layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 291 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

292 {
293  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
294  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
295  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
296 
297  // Extract IO and info
299  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
300 
301  return DequantizationLayer::validate(input, output);
302 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_detection_output_layer()

Status arm_compute::graph::backends::detail::validate_detection_output_layer ( DetectionOutputLayerNode node)

Validates a detection output layer node.

Template Parameters
DetectionOutputLayerDetectionOutput layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 312 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, DetectionOutputLayerNode::detection_output_info(), get_backing_tensor_info(), INode::id(), INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

313 {
314  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
315  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
316  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
317 
318  // Extract IO and info
319  arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
320  arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
321  arm_compute::ITensorInfo *input2 = get_backing_tensor_info(node.input(2));
322  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
323  const DetectionOutputLayerInfo detect_info = node.detection_output_info();
324 
325  return DetectionOutputLayer::validate(input0, input1, input2, output, detect_info);
326 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_detection_post_process_layer()

Status arm_compute::graph::backends::detail::validate_detection_post_process_layer ( DetectionPostProcessLayerNode node)

Validates a detection post process layer node.

Template Parameters
DetectionPostProcessLayerDetectionOutput layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 336 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, DetectionPostProcessLayerNode::detection_post_process_info(), get_backing_tensor_info(), INode::id(), INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

337 {
338  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionPostProcessLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
339  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
340  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 4);
341 
342  // Extract IO and info
343  arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
344  arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
345  arm_compute::ITensorInfo *input2 = get_backing_tensor_info(node.input(2));
346  arm_compute::ITensorInfo *output0 = get_backing_tensor_info(node.output(0));
347  arm_compute::ITensorInfo *output1 = get_backing_tensor_info(node.output(1));
348  arm_compute::ITensorInfo *output2 = get_backing_tensor_info(node.output(2));
349  arm_compute::ITensorInfo *output3 = get_backing_tensor_info(node.output(3));
350  const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
351 
352  return DetectionPostProcessLayer::validate(input0, input1, input2, output0, output1, output2, output3, detect_info);
353 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_eltwise_Layer()

Status arm_compute::graph::backends::detail::validate_eltwise_Layer ( EltwiseLayerNode node)

Validates a element-wise layer node.

Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 700 of file ValidateHelpers.h.

References arm_compute::graph::Add, ARM_COMPUTE_ERROR, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, EltwiseLayerNode::convert_policy(), arm_compute::graph::Div, EltwiseLayerNode::eltwise_operation(), EltwiseLayerNode::fused_activation(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::graph::Max, arm_compute::graph::Mul, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), EltwiseLayerNode::output_quant_info(), EltwiseLayerNode::rounding_policy(), arm_compute::graph::Sub, and arm_compute::validate().

701 {
702  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
703  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
704  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
705 
706  // Extract input and output
707  const arm_compute::ITensorInfo *input1 = detail::get_backing_tensor_info(node.input(0));
708  const arm_compute::ITensorInfo *input2 = detail::get_backing_tensor_info(node.input(1));
709  const arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
710  const EltwiseOperation eltwise_op = node.eltwise_operation();
711  const ConvertPolicy convert_policy = node.convert_policy();
712  const RoundingPolicy round_policy = node.rounding_policy();
713  const ActivationLayerInfo act_info = node.fused_activation();
714  const QuantizationInfo quant_info = node.output_quant_info();
715 
716  // Validate function
717  if(eltwise_op == EltwiseOperation::Add)
718  {
719  return EltwiseLayerFunctions::ArithmeticAddition::validate(input1, input2, output, convert_policy, act_info);
720  }
721  else if(eltwise_op == EltwiseOperation::Sub)
722  {
723  return EltwiseLayerFunctions::ArithmeticSubtraction::validate(input1, input2, output, convert_policy, act_info);
724  }
725  else if(eltwise_op == EltwiseOperation::Mul)
726  {
727  return EltwiseLayerFunctions::PixelWiseMultiplication::validate(input1, input2, output, 1.0f, convert_policy, round_policy, act_info);
728  }
729  else if(eltwise_op == EltwiseOperation::Max)
730  {
731  return EltwiseLayerFunctions::ElementwiseMax::validate(input1, input2, output, act_info);
732  }
733  else if(eltwise_op == EltwiseOperation::Div)
734  {
735  return EltwiseLayerFunctions::ArithmeticDivision::validate(input1, input2, output, act_info);
736  }
737  else
738  {
739  ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
740  }
741  return Status{};
742 }
EltwiseOperation
Supported Element-wise operations.
Definition: Types.h:109
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
RoundingPolicy
Rounding method.
Definition: Rounding.h:30
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
ConvertPolicy
Policy to handle integer overflow.
Definition: Types.h:391
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_fused_convolution_with_post_op()

Status arm_compute::graph::backends::detail::validate_fused_convolution_with_post_op ( FusedConvolutionWithPostOpNode node)

Validates a Convolution layer node.

Template Parameters
GEMMConvolutionLayerGEMM Convolution layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 195 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, arm_compute::test::validation::conv_info, FusedConvolutionWithPostOpNode::convolution_info(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), INode::name(), FusedConvolutionWithPostOpNode::num_groups(), arm_compute::test::validation::num_groups, INode::num_inputs(), INode::num_outputs(), INode::output(), arm_compute::S32, ITensorInfo::set_data_type(), and arm_compute::validate().

196 {
197  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating fused ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
198  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 4);
199  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
200 
201  // Extract IO and info
203  arm_compute::ITensorInfo *weights = get_backing_tensor_info(node.input(1));
204  arm_compute::ITensorInfo *biases = get_backing_tensor_info(node.input(2));
205  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
206 
207  if(is_data_type_quantized_asymmetric(input->data_type()))
208  {
209  biases->set_data_type(DataType::S32);
210  }
211 
212  const PadStrideInfo conv_info = node.convolution_info();
213  //const ConvolutionMethod conv_algorithm = node.convolution_method();
214  //const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
215  const unsigned int num_groups = node.num_groups();
216 
217  // Validate function
218  return GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info,
219  WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), num_groups);
220 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
const unsigned int num_groups
Definition: Im2Col.cpp:153
virtual ITensorInfo & set_data_type(DataType data_type)=0
Set the data type to the specified value.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1003
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_generate_proposals_layer()

Status arm_compute::graph::backends::detail::validate_generate_proposals_layer ( GenerateProposalsLayerNode node)

Validates a Generate Proposals layer node.

Template Parameters
GenerateProposalsLayerGenerate Proposals layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 364 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), GenerateProposalsLayerNode::info(), arm_compute::test::validation::info, INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

365 {
366  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating GenerateProposalsLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
367  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
368  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 3);
369 
370  // Extract IO and info
374  arm_compute::ITensorInfo *proposals = get_backing_tensor_info(node.output(0));
375  arm_compute::ITensorInfo *scores_out = get_backing_tensor_info(node.output(1));
376  arm_compute::ITensorInfo *num_valid_proposals = get_backing_tensor_info(node.output(2));
377  const GenerateProposalsInfo info = node.info();
378 
379  return GenerateProposalsLayer::validate(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
380 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_l2_normalize_layer()

Status arm_compute::graph::backends::detail::validate_l2_normalize_layer ( L2NormalizeLayerNode node)

Validates a L2Normalization layer node.

Template Parameters
L2Normalizationlayer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 391 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, L2NormalizeLayerNode::axis(), arm_compute::quantization::epsilon, L2NormalizeLayerNode::epsilon(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

392 {
393  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating L2NormalizeLayerNode node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
394  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
395  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
396 
397  // Extract IO and info
399  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
400  int axis = node.axis();
401  float epsilon = node.epsilon();
402 
403  // Validate function
404  return L2NormalizeLayer::validate(input, output, axis, epsilon);
405 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_node()

void arm_compute::graph::backends::detail::validate_node ( const INode node,
size_t  num_expected_inputs,
size_t  num_expected_outputs 
)

Definition at line 77 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_UNUSED, INode::assigned_target(), INode::id(), INode::name(), INode::num_inputs(), INode::num_outputs(), and INode::type().

78 {
79  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
80  << " Target: " << TargetInfo::TargetType
81  << " ID: " << node.id()
82  << node.name()
83  << std::endl);
84 
85  ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
86  ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
87  ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
88  ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
89 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_normalize_planar_yuv_layer()

Status arm_compute::graph::backends::detail::validate_normalize_planar_yuv_layer ( NormalizePlanarYUVLayerNode node)

Validates a NormalizePlanarYUV layer node.

Template Parameters
NormalizePlanarYUVLayerlayer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 416 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

417 {
418  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating NormalizePlanarYUVLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
419  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
420  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
421 
422  // Extract IO and info
426  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
427 
428  // Validate function
429  return NormalizePlanarYUVLayer::validate(input, output, mean, std);
430 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_pad_layer()

Status arm_compute::graph::backends::detail::validate_pad_layer ( PadLayerNode node)

Validates a pad layer node.

Template Parameters
PadLayerPad layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 441 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), PadLayerNode::padding(), and arm_compute::validate().

442 {
443  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PadLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
444  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
445  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
446 
447  // Extract IO and info
449  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
450  const PaddingList &padding = node.padding();
451 
452  return PadLayer::validate(input, output, padding);
453 }
std::vector< PaddingInfo > PaddingList
List of padding information.
Definition: Types.h:440
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_permute_layer()

Status arm_compute::graph::backends::detail::validate_permute_layer ( PermuteLayerNode node)

Validates a permute layer node.

Template Parameters
PermuteLayerPermute layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 464 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), PermuteLayerNode::permutation_vector(), and arm_compute::validate().

465 {
466  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PermuteLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
467  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
468  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
469 
470  // Extract IO and info
472  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
473  const PermutationVector &perm = node.permutation_vector();
474 
475  return PermuteLayer::validate(input, output, perm);
476 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Strides PermutationVector
Permutation vector.
Definition: Types.h:51
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_prelu_layer()

Status arm_compute::graph::backends::detail::validate_prelu_layer ( PReluLayerNode node)

Validates a PRelu layer node.

Template Parameters
PReluLayerPRelu layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 487 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

488 {
489  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PRelu node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
490  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
491  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
492 
493  // Extract IO and info
495  arm_compute::ITensorInfo *alpha = get_backing_tensor_info(node.input(1));
496  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
497 
498  return PReluLayer::validate(input, alpha, output);
499 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_priorbox_layer()

Status arm_compute::graph::backends::detail::validate_priorbox_layer ( PriorBoxLayerNode node)

Validates a priorbox layer node.

Template Parameters
PriorBoxLayerPriorBox layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 510 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), PriorBoxLayerNode::priorbox_info(), and arm_compute::validate().

511 {
512  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PriorBoxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
513  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
514  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
515 
516  // Extract IO and info
517  arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
518  arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
519  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
520  const PriorBoxLayerInfo prior_info = node.priorbox_info();
521 
522  return PriorBoxLayer::validate(input0, input1, output, prior_info);
523 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_quantization_layer()

Status arm_compute::graph::backends::detail::validate_quantization_layer ( QuantizationLayerNode node)

Validates a Quantization layer node.

Template Parameters
QuantizationLayerQuantization layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 534 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

535 {
536  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating QuantizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
537  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
538  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
539 
540  // Extract input and output
542  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
543 
544  // Validate function
545  return QuantizationLayer::validate(input, output);
546 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_reduction_operation_layer()

Status arm_compute::graph::backends::detail::validate_reduction_operation_layer ( ReductionLayerNode node)

Validates a Reduction operation layer node.

Template Parameters
ReductionLayerReduction layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 557 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, ReductionLayerNode::axis(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, ReductionLayerNode::keep_dims(), INode::name(), INode::num_inputs(), INode::num_outputs(), ReductionLayerNode::op(), INode::output(), and arm_compute::validate().

558 {
559  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReductionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
560 
561  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
562  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
563 
564  // Extract input and output
566  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
567 
568  // Validate function
569  return ReductionLayer::validate(input, output, node.axis(), node.op(), node.keep_dims());
570 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_reorg_layer()

Status arm_compute::graph::backends::detail::validate_reorg_layer ( ReorgLayerNode node)

Validates a Reorg layer node.

Template Parameters
ReorgLayerReorg layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 581 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), ReorgLayerNode::stride(), and arm_compute::validate().

582 {
583  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReorgLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
584  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
585  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
586 
587  // Extract input and output
589  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
590 
591  // Validate function
592  return ReorgLayer::validate(input, output, node.stride());
593 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_reshape_layer()

Status arm_compute::graph::backends::detail::validate_reshape_layer ( ReshapeLayerNode node)

Validates a Reshape layer node.

Template Parameters
ReshapeLayerReshape layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 604 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::validate().

605 {
606  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
607  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
608  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
609 
610  // Extract input and output
613 
614  // Validate function
615  return ReshapeLayer::validate(input, output);
616 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_roi_align_layer()

Status arm_compute::graph::backends::detail::validate_roi_align_layer ( ROIAlignLayerNode node)

Validates a ROI Align layer node.

Template Parameters
ROIAlignLayerROIAlign layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 627 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), ROIAlignLayerNode::pooling_info(), and arm_compute::validate().

628 {
629  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ROIAlignLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
630  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
631  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
632 
633  // Extract input and output
637  const ROIPoolingLayerInfo &pool_info = node.pooling_info();
638 
639  // Validate function
640  return ROIAlignLayer::validate(input, rois, output, pool_info);
641 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_slice_layer()

Status arm_compute::graph::backends::detail::validate_slice_layer ( SliceLayerNode node)

Validates a Slice layer node.

Template Parameters
SliceLayerSlice layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 652 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, SliceLayerNode::ends(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), SliceLayerNode::starts(), and arm_compute::validate().

653 {
654  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating Slice node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
655  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
656  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
657 
658  // Extract IO and info
660  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
661  const Coordinates starts = node.starts();
662  const Coordinates ends = node.ends();
663 
664  return SliceLayer::validate(input, output, starts, ends);
665 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_strided_slice_layer()

Status arm_compute::graph::backends::detail::validate_strided_slice_layer ( StridedSliceLayerNode node)

Validates a Strided Slice layer node.

Template Parameters
StridedSliceLayerStrided Slice layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 676 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, StridedSliceLayerInfo::begin_mask(), StridedSliceLayerInfo::end_mask(), StridedSliceLayerNode::ends(), get_backing_tensor_info(), INode::id(), arm_compute::test::validation::info, INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), StridedSliceLayerInfo::shrink_axis_mask(), StridedSliceLayerNode::starts(), StridedSliceLayerNode::strided_slice_info(), StridedSliceLayerNode::strides(), and arm_compute::validate().

677 {
678  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating StridedSlice node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
679  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
680  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
681 
682  // Extract IO and info
684  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
685  const Coordinates starts = node.starts();
686  const Coordinates ends = node.ends();
687  const BiStrides strides = node.strides();
688  const StridedSliceLayerInfo info = node.strided_slice_info();
689 
690  return StridedSliceLayer::validate(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
691 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Coordinates BiStrides
Bidirectional strides.
Definition: Types.h:53
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)

◆ validate_unary_eltwise_layer()

Status arm_compute::graph::backends::detail::validate_unary_eltwise_layer ( UnaryEltwiseLayerNode node)

Validates a unary element-wise layer node.

Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 750 of file ValidateHelpers.h.

References ARM_COMPUTE_ERROR, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, UnaryEltwiseLayerNode::eltwise_descriptor(), arm_compute::graph::Exp, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), UnaryEltwiseLayerDescriptor::op, INode::output(), and arm_compute::validate().

751 {
752  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
753  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
754  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
755 
756  // Extract input and output
758  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
759  const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
760 
761  // Validate function
762  if(eltwise_op == UnaryEltwiseOperation::Exp)
763  {
765  }
766  else
767  {
768  ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
769  }
770 
771  return Status{};
772 }
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
Status validate(const ITensorInfo *scores_in, const ITensorInfo *boxes_in, const ITensorInfo *batch_splits_in, const ITensorInfo *scores_out, const ITensorInfo *boxes_out, const ITensorInfo *classes, const ITensorInfo *batch_splits_out, const ITensorInfo *keeps, const ITensorInfo *keeps_size, const BoxNMSLimitInfo info)
UnaryEltwiseOperation
Supported Unary Element-wise operations.
Definition: Types.h:120