Compute Library
 22.11
arm_compute::graph::backends::detail Namespace Reference

Data Structures

class  BackendRegistrar
 Helper class to statically register a backend. More...
 

Functions

template<typename TargetInfo >
TargetInfo::TensorType * get_backing_tensor (arm_compute::graph::Tensor *tensor)
 Returns backing tensor of a given tensor. More...
 
template<typename TargetInfo >
void validate_node (const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
 
template<typename ActivationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_activation_layer (ActivationLayerNode &node)
 Creates a backend activation layer function. More...
 
template<typename ArgMinMaxLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_arg_min_max_layer (ArgMinMaxLayerNode &node)
 Creates a backend argminmax layer function. More...
 
template<typename BatchNormalizationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_batch_normalization_layer (BatchNormalizationLayerNode &node)
 Create a backend batch normalization layer function. More...
 
template<typename FusedLayerTypes , typename TargetInfo >
std::unique_ptr< IFunctioncreate_fused_convolution_batch_normalization_layer (FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
 Create a backend batch normalization layer function. More...
 
template<typename FusedLayerTypes , typename TargetInfo >
std::unique_ptr< IFunctioncreate_fused_depthwise_convolution_batch_normalization_layer (FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
 Create a backend fused depthwise convolution batch normalization layer function. More...
 
template<typename BoundingBoxTransformLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_bounding_box_transform_layer (BoundingBoxTransformLayerNode &node)
 Create a backend bounding box transform layer function. More...
 
template<typename ChannelShuffleLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_channel_shuffle_layer (ChannelShuffleLayerNode &node)
 Create a backend channel shuffle layer function. More...
 
template<typename ConcatenateLayerFunction , typename TargetInfo >
std::unique_ptr< arm_compute::IFunctioncreate_concatenate_layer (ConcatenateLayerNode &node)
 Create a backend layer concatenate function. More...
 
template<typename ConvolutionLayerFunctions , typename TargetInfo >
std::unique_ptr< IFunctioncreate_convolution_layer (ConvolutionLayerNode &node, GraphContext &ctx)
 Create a backend convolution layer function. More...
 
template<typename ConvolutionLayerFunctions , typename TargetInfo >
std::unique_ptr< IFunctioncreate_fused_convolution_with_post_op (FusedConvolutionWithPostOpNode &node, GraphContext &ctx)
 Create a backend convolution layer function with post operator. More...
 
template<typename FusedLayerTypes , typename TargetInfo >
std::unique_ptr< IFunctioncreate_fused_convolution_batch_normalization_with_post_op (FusedConvolutionBatchNormalizationWithPostOpsNode &node, GraphContext &ctx)
 Create a backend convolution batch normalization layer function with post operator. More...
 
template<typename DeconvolutionLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_deconvolution_layer (DeconvolutionLayerNode &node, GraphContext &ctx)
 Create a backend deconvolution layer function. More...
 
template<typename DepthwiseConvolutionLayer , typename TargetInfo >
std::unique_ptr< IFunctioncreate_depthwise_convolution_layer (DepthwiseConvolutionLayerNode &node)
 Create a backend layer depth-wise convolution function. More...
 
template<typename DepthToSpaceLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_depth_to_space_layer (DepthToSpaceLayerNode &node)
 Create a backend depth to space layer function. More...
 
template<typename DequantizationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_dequantization_layer (DequantizationLayerNode &node)
 Create a backend dequantize layer function. More...
 
template<typename DetectionOutputLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_detection_output_layer (DetectionOutputLayerNode &node)
 Create a backend detection output layer function. More...
 
template<typename DetectionPostProcessLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_detection_post_process_layer (DetectionPostProcessLayerNode &node)
 Create a backend detection post process layer function. More...
 
template<typename EltwiseFunctions , typename TargetInfo >
std::unique_ptr< IFunctioncreate_eltwise_layer (EltwiseLayerNode &node)
 Create a backend element-wise operation layer function. More...
 
template<typename UnaryEltwiseFunctions , typename TargetInfo >
std::unique_ptr< IFunctioncreate_unary_eltwise_layer (UnaryEltwiseLayerNode &node)
 Create a backend unary element-wise operation layer function. More...
 
template<typename FlattenLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_flatten_layer (FlattenLayerNode &node)
 Create a backend flatten layer function. More...
 
template<typename FullyConnectedLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_fully_connected_layer (FullyConnectedLayerNode &node, GraphContext &ctx)
 Create a backend fully connected layer function. More...
 
template<typename GenerateProposalsLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_generate_proposals_layer (GenerateProposalsLayerNode &node, GraphContext &ctx)
 Create a backend generate proposals layer function. More...
 
template<typename L2NormalizeLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_l2_normalize_layer (L2NormalizeLayerNode &node, GraphContext &ctx)
 Create a backend l2 normalization layer function. More...
 
template<typename NormalizationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_normalization_layer (NormalizationLayerNode &node, GraphContext &ctx)
 Create a backend normalization layer function. More...
 
template<typename NormalizePlanarYUVLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_normalize_planar_yuv_layer (NormalizePlanarYUVLayerNode &node)
 Create a backend normalize planar YUV layer function. More...
 
template<typename PadLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_pad_layer (PadLayerNode &node)
 Create a backend pad layer function. More...
 
template<typename PermuteLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_permute_layer (PermuteLayerNode &node)
 Create a backend permute layer function. More...
 
template<typename PoolingLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_pooling_layer (PoolingLayerNode &node)
 Create a backend pooling layer function. More...
 
template<typename PReluFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_prelu_layer (PReluLayerNode &node)
 Create a backend PRelu layer function. More...
 
template<typename TargetInfo >
std::unique_ptr< IFunctioncreate_print_layer (PrintLayerNode &node)
 Create a backend print layer function. More...
 
template<typename PriorBoxLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_priorbox_layer (PriorBoxLayerNode &node)
 Create a backend priorbox layer function. More...
 
template<typename QuantizationLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_quantization_layer (QuantizationLayerNode &node)
 Create a backend quantization layer function. More...
 
template<typename ReductionOperationFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_reduction_operation_layer (ReductionLayerNode &node, GraphContext &ctx)
 Create a backend reduction operation layer function. More...
 
template<typename ReorgLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_reorg_layer (ReorgLayerNode &node)
 Create a backend reorg layer function. More...
 
template<typename ReshapeLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_reshape_layer (ReshapeLayerNode &node)
 Create a backend reshape layer function. More...
 
template<typename ResizeLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_resize_layer (ResizeLayerNode &node)
 Create a backend resize layer function. More...
 
template<typename ROIAlignLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_roi_align_layer (ROIAlignLayerNode &node)
 Create a backend ROI align layer function. More...
 
template<typename SliceLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_slice_layer (SliceLayerNode &node)
 Create a backend slice layer function. More...
 
template<typename SoftmaxLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_softmax_layer (SoftmaxLayerNode &node, GraphContext &ctx)
 Create a backend softmax layer function. More...
 
template<typename StackLayerFunction , typename TargetInfo >
std::unique_ptr< arm_compute::IFunctioncreate_stack_layer (StackLayerNode &node)
 Create a backend layer stack function. More...
 
template<typename StridedSliceLayerFunction , typename TargetInfo >
std::unique_ptr< IFunctioncreate_strided_slice_layer (StridedSliceLayerNode &node)
 Create a backend slice layer function. More...
 
arm_compute::ITensorInfoget_backing_tensor_info (arm_compute::graph::Tensor *tensor)
 Returns backing tensor info of a given tensor. More...
 
template<typename ArgMinMaxLayer >
Status validate_arg_min_max_layer (ArgMinMaxLayerNode &node)
 Validates a ArgMinMax layer node. More...
 
template<typename BoundingBoxTransformLayer >
Status validate_bounding_box_transform_layer (BoundingBoxTransformLayerNode &node)
 Validates a Bounding Box Transform layer node. More...
 
template<typename ChannelShuffleLayer >
Status validate_channel_shuffle_layer (ChannelShuffleLayerNode &node)
 Validates a Channel Shuffle layer node. More...
 
template<typename ConvolutionLayer , typename DirectConvolutionLayer , typename GEMMConvolutionLayer , typename WinogradConvolutionLayer >
Status validate_convolution_layer (ConvolutionLayerNode &node)
 Validates a Convolution layer node. More...
 
template<typename GEMMConvolutionLayer >
Status validate_fused_convolution_with_post_op (FusedConvolutionWithPostOpNode &node)
 Validates a Convolution layer node. More...
 
template<typename DepthwiseConvolutionLayer >
Status validate_depthwise_convolution_layer (DepthwiseConvolutionLayerNode &node)
 Validates a Depthwise Convolution layer node. More...
 
template<typename DepthToSpaceLayer >
Status validate_depth_to_space_layer (DepthToSpaceLayerNode &node)
 Validates a depth to space layer node. More...
 
template<typename DequantizationLayer >
Status validate_dequantization_layer (DequantizationLayerNode &node)
 Validates a dequantize layer node. More...
 
template<typename DetectionOutputLayer >
Status validate_detection_output_layer (DetectionOutputLayerNode &node)
 Validates a detection output layer node. More...
 
template<typename DetectionPostProcessLayer >
Status validate_detection_post_process_layer (DetectionPostProcessLayerNode &node)
 Validates a detection post process layer node. More...
 
template<typename GenerateProposalsLayer >
Status validate_generate_proposals_layer (GenerateProposalsLayerNode &node)
 Validates a Generate Proposals layer node. More...
 
template<typename L2NormalizeLayer >
Status validate_l2_normalize_layer (L2NormalizeLayerNode &node)
 Validates a L2Normalization layer node. More...
 
template<typename NormalizePlanarYUVLayer >
Status validate_normalize_planar_yuv_layer (NormalizePlanarYUVLayerNode &node)
 Validates a NormalizePlanarYUV layer node. More...
 
template<typename PadLayer >
Status validate_pad_layer (PadLayerNode &node)
 Validates a pad layer node. More...
 
template<typename PermuteLayer >
Status validate_permute_layer (PermuteLayerNode &node)
 Validates a permute layer node. More...
 
template<typename PReluLayer >
Status validate_prelu_layer (PReluLayerNode &node)
 Validates a PRelu layer node. More...
 
template<typename PriorBoxLayer >
Status validate_priorbox_layer (PriorBoxLayerNode &node)
 Validates a priorbox layer node. More...
 
template<typename QuantizationLayer >
Status validate_quantization_layer (QuantizationLayerNode &node)
 Validates a Quantization layer node. More...
 
template<typename ReductionLayer >
Status validate_reduction_operation_layer (ReductionLayerNode &node)
 Validates a Reduction operation layer node. More...
 
template<typename ReorgLayer >
Status validate_reorg_layer (ReorgLayerNode &node)
 Validates a Reorg layer node. More...
 
template<typename ReshapeLayer >
Status validate_reshape_layer (ReshapeLayerNode &node)
 Validates a Reshape layer node. More...
 
template<typename ROIAlignLayer >
Status validate_roi_align_layer (ROIAlignLayerNode &node)
 Validates a ROI Align layer node. More...
 
template<typename SliceLayer >
Status validate_slice_layer (SliceLayerNode &node)
 Validates a Slice layer node. More...
 
template<typename StridedSliceLayer >
Status validate_strided_slice_layer (StridedSliceLayerNode &node)
 Validates a Strided Slice layer node. More...
 
template<typename EltwiseLayerFunctions >
Status validate_eltwise_Layer (EltwiseLayerNode &node)
 Validates a element-wise layer node. More...
 
template<typename UnaryEltwiseLayerFunctions >
Status validate_unary_eltwise_layer (UnaryEltwiseLayerNode &node)
 Validates a unary element-wise layer node. More...
 
template<>
std::unique_ptr< IFunctioncreate_detection_output_layer< CPPDetectionOutputLayer, CLTargetInfo > (DetectionOutputLayerNode &node)
 
template<>
std::unique_ptr< IFunctioncreate_detection_post_process_layer< CPPDetectionPostProcessLayer, CLTargetInfo > (DetectionPostProcessLayerNode &node)
 
template<>
std::unique_ptr< IFunctioncreate_normalization_layer< NENormalizationLayer, NETargetInfo > (NormalizationLayerNode &node, GraphContext &ctx)
 

Function Documentation

◆ create_activation_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_activation_layer ( ActivationLayerNode node)

Creates a backend activation layer function.

Template Parameters
ActivationLayerFunctionBackend activation function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend activation layer function

Definition at line 102 of file FunctionHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::input, and arm_compute::graph::backends::is_in_place_operation().

103 {
104  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
105 
106  // Extract IO and info
107  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
108  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
109  const ActivationLayerInfo act_info = node.activation_info();
110 
111  // Create function
112  auto func = std::make_unique<ActivationLayerFunction>();
113  func->configure(input, output, act_info);
114 
115  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
116  << node.name()
117  << " Type: " << node.type()
118  << " Target: " << TargetInfo::TargetType
119  << " Data Type: " << input->info()->data_type()
120  << " Shape: " << input->info()->tensor_shape()
121  << " Activation function: " << act_info.activation()
122  << " a: " << act_info.a()
123  << " b: " << act_info.b()
124  << " InPlace : " << is_in_place_operation(input, output)
125  << std::endl);
126 
127  return std::move(func);
128 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_in_place_operation(void *input, void *output)
Checks if an operation is in place.
Definition: Utils.h:77

◆ create_arg_min_max_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_arg_min_max_layer ( ArgMinMaxLayerNode node)

Creates a backend argminmax layer function.

Template Parameters
ArgMinMaxLayerFunctionBackend activation function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend argminmax layer function

Definition at line 140 of file FunctionHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

141 {
142  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
143 
144  // Extract IO and info
145  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
146  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
147  const ReductionOperation op = node.reduction_operation();
148  unsigned int axis = node.axis();
149 
150  // Create function
151  auto func = std::make_unique<ArgMinMaxLayerFunction>();
152  func->configure(input, axis, output, op);
153 
154  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
155  << node.name()
156  << " Type: " << node.type()
157  << " Target: " << TargetInfo::TargetType
158  << " Data Type: " << input->info()->data_type()
159  << " Shape: " << input->info()->tensor_shape()
160  << " Reduction Operation: " << op
161  << " axis: " << axis
162  << std::endl);
163 
164  return std::move(func);
165 }
ReductionOperation
Available reduction operations.
Definition: Types.h:476
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_batch_normalization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_batch_normalization_layer ( BatchNormalizationLayerNode node)

Create a backend batch normalization layer function.

Template Parameters
BatchNormalizationLayerFunctionBackend batch normalization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend batch normalization layer function

Definition at line 177 of file FunctionHelpers.h.

References ActivationLayerInfo::activation(), ARM_COMPUTE_LOG_GRAPH_INFO, ActivationLayerInfo::enabled(), arm_compute::quantization::epsilon, arm_compute::test::validation::input, arm_compute::graph::backends::is_in_place_operation(), and arm_compute::to_string().

178 {
179  validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
180 
181  // Extract IO and info
182  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
183  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
184  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
185  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
186  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
187 
188  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
189  const float epsilon = node.epsilon();
190  const ActivationLayerInfo fused_act = node.fused_activation();
191 
192  // Create and configure function
193  auto func = std::make_unique<BatchNormalizationLayerFunction>();
194  func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
195 
196  // Log info
197  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
198  << node.name()
199  << " Type: " << node.type()
200  << " Target: " << TargetInfo::TargetType
201  << " Data Type: " << input->info()->data_type()
202  << " Shape: " << input->info()->tensor_shape()
203  << " Epsilon: " << epsilon << " "
204  << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
205  << " InPlace: " << is_in_place_operation(input, output)
206  << std::endl);
207 
208  return std::move(func);
209 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const CLBuildOptions &cl_build_opts)
Definition: Utils.h:51
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_in_place_operation(void *input, void *output)
Checks if an operation is in place.
Definition: Utils.h:77

◆ create_bounding_box_transform_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_bounding_box_transform_layer ( BoundingBoxTransformLayerNode node)

Create a backend bounding box transform layer function.

Template Parameters
BoundingBoxTransformLayerFunctionBackend bounding box transform function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend bounding box transform layer function

Definition at line 334 of file FunctionHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

335 {
336  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
337 
338  // Extract IO and info
339  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
340  typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
341  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
342  const BoundingBoxTransformInfo bbox_info = node.info();
343 
344  // Create and configure function
345  auto func = std::make_unique<BoundingBoxTransformLayerFunction>();
346  func->configure(input, output, deltas, bbox_info);
347 
348  // Log info
349  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
350  << node.name()
351  << " Type: " << node.type()
352  << " Target: " << TargetInfo::TargetType
353  << " Data Type: " << input->info()->data_type()
354  << " Shape: " << input->info()->tensor_shape()
355  << " BoundingBox Info img W: " << bbox_info.img_width() << " "
356  << " BoundingBox Info img H: " << bbox_info.img_height() << " "
357  << std::endl);
358 
359  return std::move(func);
360 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_channel_shuffle_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_channel_shuffle_layer ( ChannelShuffleLayerNode node)

Create a backend channel shuffle layer function.

Template Parameters
ChannelShuffleLayerFunctionBackend channel shuffle function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend channel shuffle layer function

Definition at line 372 of file FunctionHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::input, and arm_compute::test::validation::num_groups.

373 {
374  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
375 
376  // Extract IO and info
377  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
378  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
379  const unsigned int num_groups = node.num_groups();
380 
381  // Create function
382  auto func = std::make_unique<ChannelShuffleLayerFunction>();
383  func->configure(input, output, num_groups);
384 
385  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
386  << node.name()
387  << " Type: " << node.type()
388  << " Target: " << TargetInfo::TargetType
389  << " Data Type: " << input->info()->data_type()
390  << " Shape: " << input->info()->tensor_shape()
391  << " Num groups: " << num_groups
392  << std::endl);
393 
394  return std::move(func);
395 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
const unsigned int num_groups
Definition: Im2Col.cpp:153

◆ create_concatenate_layer()

std::unique_ptr<arm_compute::IFunction> arm_compute::graph::backends::detail::create_concatenate_layer ( ConcatenateLayerNode node)

Create a backend layer concatenate function.

Template Parameters
ConcatenateLayerFunctionBackend concatenate function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend concatenate layer function

Definition at line 407 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ConcatenateLayerNode::concatenation_axis(), arm_compute::test::validation::data_layout, Tensor::desc(), arm_compute::graph::get_dimension_idx(), INode::id(), INode::input(), arm_compute::is_data_type_quantized_asymmetric(), ConcatenateLayerNode::is_enabled(), TensorDescriptor::layout, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), ConcatenateLayerNode::type(), and arm_compute::UNKNOWN.

408 {
409  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
410  ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
411 
412  // Return nullptr if depth concatenate is switched off
413  if(!node.is_enabled())
414  {
415  return nullptr;
416  }
417 
418  // Extract IO and info
419  std::vector<typename TargetInfo::SrcTensorType *> inputs;
420  for(unsigned int i = 0; i < node.num_inputs(); ++i)
421  {
422  inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
423  }
424  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
425  const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
426  const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
427 
428  // Create and configure function
429  auto func = std::make_unique<ConcatenateLayerFunction>();
430  func->configure(inputs, output, concat_axis);
431 
432  // Log info
433  const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
434  std::ostringstream qss;
435  if(is_quantized)
436  {
437  qss << " Output QuantInfo: " << output->info()->quantization_info();
438  }
439  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
440  << node.name()
441  << " Type: " << node.type()
442  << " Target: " << TargetInfo::TargetType
443  << " Data Type: " << output->info()->data_type()
444  << " Shape: " << output->info()->tensor_shape()
445  << " Num Inputs: " << inputs.size()
446  << " Axis: " << concat_axis
447  << qss.str()
448  << std::endl);
449 
450  return std::move(func);
451 }
Unknown CL kernel type.
Definition: CLTypes.h:82
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1052
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
DataLayout
[DataLayout enum definition]
Definition: Types.h:113
size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get index of a tensor&#39;s given dimension depending on its layout.
Definition: Utils.cpp:148

◆ create_convolution_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_convolution_layer ( ConvolutionLayerNode node,
GraphContext ctx 
)

Create a backend convolution layer function.

Template Parameters
ConvolutionLayerFunctionsBackend convolution functions
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend convolution layer function

Definition at line 464 of file FunctionHelpers.h.

References ActivationLayerInfo::activation(), ARM_COMPUTE_ERROR_ON_MSG, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, arm_compute::graph::Direct, arm_compute::graph::Enabled, ActivationLayerInfo::enabled(), arm_compute::graph::GEMM, arm_compute::graph::backends::get_memory_manager(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), arm_compute::test::validation::num_groups, arm_compute::S32, arm_compute::to_string(), arm_compute::utils::cast::U, and arm_compute::graph::Winograd.

465 {
466  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
467 
468  // Extract IO and info
469  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
470  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
471  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
472  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
473 
474  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
475 
476  if(is_quantized)
477  {
478  biases->info()->set_data_type(DataType::S32);
479  }
480 
481  const PadStrideInfo conv_info = node.convolution_info();
482  const unsigned int num_groups = node.num_groups();
483  const ConvolutionMethod conv_algorithm = node.convolution_method();
484  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
485  const ActivationLayerInfo fused_act = node.fused_activation();
486 
487  // Create and configure function (we assume that functions have been validated before creation)
488  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
489  std::unique_ptr<IFunction> func;
490  std::string func_name;
491 
492  if(conv_algorithm == ConvolutionMethod::Winograd)
493  {
494  ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
495  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
496  std::string("WinogradConvolutionLayer"), mm,
497  input, weights, biases, output, conv_info, fused_act, fast_math);
498  }
499  else if(conv_algorithm == ConvolutionMethod::Direct)
500  {
501  ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
502  std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
503  std::string("DirectConvolutionLayer"),
504  input, weights, biases, output, conv_info, fused_act);
505  }
506  else if(conv_algorithm == ConvolutionMethod::GEMM)
507  {
508  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
509  std::string("GEMMConvolutionLayer"), mm,
510  input, weights, biases, output, conv_info,
511  WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
512  }
513  else
514  {
515  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
516  std::string("GenericConvolutionLayer"), mm,
517  input, weights, biases, output, conv_info,
518  WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
519  }
520 
521  // Log info
522  std::ostringstream qss;
523  if(is_quantized)
524  {
525  qss << " Input QuantInfo: " << input->info()->quantization_info()
526  << " Weights QuantInfo: " << weights->info()->quantization_info()
527  << " Output QuantInfo: " << output->info()->quantization_info();
528  }
529  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
530  << node.name()
531  << " Type: " << func_name
532  << " Target: " << TargetInfo::TargetType
533  << " Data Type: " << input->info()->data_type()
534  << " Groups: " << num_groups
535  << " Input shape: " << input->info()->tensor_shape()
536  << " Weights shape: " << weights->info()->tensor_shape()
537  << " Output shape: " << output->info()->tensor_shape()
538  << qss.str()
539  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
540  << std::endl);
541  return std::move(func);
542 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const CLBuildOptions &cl_build_opts)
Definition: Utils.h:51
ConvolutionMethod
Available ConvolutionMethod.
Definition: Types.h:134
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
Definition: Error.h:456
const unsigned int num_groups
Definition: Im2Col.cpp:153
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1052
GEMM CL kernel type.
Definition: CLTypes.h:86
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_deconvolution_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_deconvolution_layer ( DeconvolutionLayerNode node,
GraphContext ctx 
)

Create a backend deconvolution layer function.

Template Parameters
DeconvolutionLayerFunctionBackend deconvolution function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend deconvolution layer function

Definition at line 732 of file FunctionHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::backends::get_memory_manager(), and arm_compute::test::validation::input.

733 {
734  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
735 
736  // Extract IO and info
737  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
738  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
739  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
740  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
741 
742  const PadStrideInfo deconv_info = node.deconvolution_info();
743 
744  // Create and configure function (we assume that functions have been validated before creation)
745  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
746  std::unique_ptr<IFunction> func;
747 
748  std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
749  std::string(), mm,
750  input, weights, biases, output, deconv_info);
751 
752  // Log info
753  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
754  << node.name()
755  << " Type: " << node.type()
756  << " Target: " << TargetInfo::TargetType
757  << " Data Type: " << input->info()->data_type()
758  << " Input shape: " << input->info()->tensor_shape()
759  << " Weights shape: " << weights->info()->tensor_shape()
760  << " Output shape: " << output->info()->tensor_shape()
761  << std::endl);
762  return func;
763 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_depth_to_space_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_depth_to_space_layer ( DepthToSpaceLayerNode node)

Create a backend depth to space layer function.

Template Parameters
DepthToSpaceLayerNodeFunction Backend depth to space function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend depth to space layer function

Definition at line 837 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

838 {
839  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
840 
841  // Extract IO and info
842  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
843  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
844 
845  ARM_COMPUTE_ERROR_ON(input == nullptr);
846  ARM_COMPUTE_ERROR_ON(output == nullptr);
847 
848  // Create and configure function
849  auto func = std::make_unique<DepthToSpaceLayerFunction>();
850  func->configure(input, output, node.block_shape());
851 
852  // Log info
853  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
854  << node.name()
855  << " Type: " << node.type()
856  << " Target: " << TargetInfo::TargetType
857  << " Data Type: " << input->info()->data_type()
858  << " Input shape: " << input->info()->tensor_shape()
859  << " Block Size: " << node.block_shape()
860  << " Output shape: " << output->info()->tensor_shape()
861  << std::endl);
862 
863  return std::move(func);
864 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_depthwise_convolution_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_depthwise_convolution_layer ( DepthwiseConvolutionLayerNode node)

Create a backend layer depth-wise convolution function.

Template Parameters
DepthwiseConvolutionLayerFunctionsBackend depthwise convolution function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend depth-wise convolution layer function

Definition at line 775 of file FunctionHelpers.h.

References ActivationLayerInfo::activation(), ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, ActivationLayerInfo::enabled(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), arm_compute::S32, and arm_compute::to_string().

776 {
777  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
778 
779  // Extract IO and info
780  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
781  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
782  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
783  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
784 
785  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
786 
787  if(is_quantized)
788  {
789  biases->info()->set_data_type(DataType::S32);
790  }
791 
792  const PadStrideInfo conv_info = node.convolution_info();
793  const unsigned int depth_multiplier = node.depth_multiplier();
794  const ActivationLayerInfo fused_act = node.fused_activation();
795 
796  // Create and configure function (we assume that functions have been validated before creation)
797  std::unique_ptr<IFunction> func;
798  std::string func_name;
799 
800  std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
801  std::string("DepthwiseConvolutionLayer"),
802  input, weights, biases, output, conv_info, depth_multiplier, fused_act);
803 
804  // Log info
805  std::ostringstream qss;
806  if(is_quantized)
807  {
808  qss << " Input QuantInfo: " << input->info()->quantization_info()
809  << " Weights QuantInfo: " << weights->info()->quantization_info()
810  << " Output QuantInfo: " << output->info()->quantization_info();
811  }
812  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
813  << node.name()
814  << " Type: " << func_name
815  << " Target: " << TargetInfo::TargetType
816  << " Data Type: " << input->info()->data_type()
817  << " Input shape: " << input->info()->tensor_shape()
818  << " Weights shape: " << weights->info()->tensor_shape()
819  << " Output shape: " << output->info()->tensor_shape()
820  << " Depth multiplier: " << depth_multiplier
821  << qss.str()
822  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
823  << std::endl);
824  return std::move(func);
825 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const CLBuildOptions &cl_build_opts)
Definition: Utils.h:51
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1052

◆ create_dequantization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_dequantization_layer ( DequantizationLayerNode node)

Create a backend dequantize layer function.

Template Parameters
DequantizationLayerFunction Backend dequantize function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend dequantize layer function

Definition at line 876 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

877 {
878  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
879 
880  // Extract IO and info
881  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
882  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
883 
884  ARM_COMPUTE_ERROR_ON(input == nullptr);
885  ARM_COMPUTE_ERROR_ON(output == nullptr);
886 
887  // Create and configure function
888  auto func = std::make_unique<DequantizationLayerFunction>();
889  func->configure(input, output);
890 
891  // Log info
892  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
893  << node.name()
894  << " Type: " << node.type()
895  << " Target: " << TargetInfo::TargetType
896  << " Data Type: " << input->info()->data_type()
897  << " Input shape: " << input->info()->tensor_shape()
898  << " Input quantization info: " << output->info()->quantization_info()
899  << " Output shape: " << output->info()->tensor_shape()
900  << std::endl);
901 
902  return std::move(func);
903 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_detection_output_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_detection_output_layer ( DetectionOutputLayerNode node)

Create a backend detection output layer function.

Template Parameters
DetectionOutputLayerFunction Backend detection output function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend detection output layer function

Definition at line 914 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, and ARM_COMPUTE_LOG_GRAPH_INFO.

915 {
916  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
917 
918  // Extract IO and info
919  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
920  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
921  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
922  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
923  const DetectionOutputLayerInfo detect_info = node.detection_output_info();
924 
925  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
926  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
927  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
928  ARM_COMPUTE_ERROR_ON(output == nullptr);
929 
930  // Create and configure function
931  auto func = std::make_unique<DetectionOutputLayerFunction>();
932  func->configure(input0, input1, input2, output, detect_info);
933 
934  // Log info
935  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
936  << node.name()
937  << " Type: " << node.type()
938  << " Target: " << TargetInfo::TargetType
939  << " Data Type: " << input0->info()->data_type()
940  << " Input0 shape: " << input0->info()->tensor_shape()
941  << " Input1 shape: " << input1->info()->tensor_shape()
942  << " Input2 shape: " << input2->info()->tensor_shape()
943  << " Output shape: " << output->info()->tensor_shape()
944  << " DetectionOutputLayer info: " << detect_info
945  << std::endl);
946 
947  return std::move(func);
948 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_detection_output_layer< CPPDetectionOutputLayer, CLTargetInfo >()

Definition at line 130 of file CLFunctionsFactory.cpp.

References ARM_COMPUTE_ERROR_ON, and ARM_COMPUTE_LOG_GRAPH_INFO.

Referenced by CLFunctionFactory::create().

131 {
132  validate_node<CLTargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
133 
134  // Extract IO and info
135  CLTargetInfo::TensorType *input0 = get_backing_tensor<CLTargetInfo>(node.input(0));
136  CLTargetInfo::TensorType *input1 = get_backing_tensor<CLTargetInfo>(node.input(1));
137  CLTargetInfo::TensorType *input2 = get_backing_tensor<CLTargetInfo>(node.input(2));
138  CLTargetInfo::TensorType *output = get_backing_tensor<CLTargetInfo>(node.output(0));
139  const DetectionOutputLayerInfo detect_info = node.detection_output_info();
140 
141  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
142  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
143  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
144  ARM_COMPUTE_ERROR_ON(output == nullptr);
145 
146  // Create and configure function
147  auto func = std::make_unique<CPPDetectionOutputLayer>();
148  func->configure(input0, input1, input2, output, detect_info);
149 
150  // Log info
151  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
152  << node.name()
153  << " Type: " << node.type()
154  << " Target: " << CLTargetInfo::TargetType
155  << " Data Type: " << input0->info()->data_type()
156  << " Input0 shape: " << input0->info()->tensor_shape()
157  << " Input1 shape: " << input1->info()->tensor_shape()
158  << " Input2 shape: " << input2->info()->tensor_shape()
159  << " Output shape: " << output->info()->tensor_shape()
160  << " DetectionOutputLayer info: " << detect_info
161  << std::endl);
162 
163  auto wrap_function = std::make_unique<CPPWrapperFunction>();
164 
165  wrap_function->register_function(std::move(func));
166  wrap_function->register_tensor(input0);
167  wrap_function->register_tensor(input1);
168  wrap_function->register_tensor(input2);
169  wrap_function->register_tensor(output);
170 
171  return std::move(wrap_function);
172 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_detection_post_process_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_detection_post_process_layer ( DetectionPostProcessLayerNode node)

Create a backend detection post process layer function.

Template Parameters
DetectionPostProcessLayerFunctionBackend detection output function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend detection post process layer function

Definition at line 960 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, and ARM_COMPUTE_LOG_GRAPH_INFO.

961 {
962  validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
963 
964  // Extract IO and info
965  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
966  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
967  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
968  typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
969  typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
970  typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
971  typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
972  const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
973 
974  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
975  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
976  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
977  ARM_COMPUTE_ERROR_ON(output0 == nullptr);
978  ARM_COMPUTE_ERROR_ON(output1 == nullptr);
979  ARM_COMPUTE_ERROR_ON(output2 == nullptr);
980  ARM_COMPUTE_ERROR_ON(output3 == nullptr);
981 
982  // Create and configure function
983  auto func = std::make_unique<DetectionPostProcessLayerFunction>();
984  func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
985 
986  // Log info
987  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
988  << node.name()
989  << " Type: " << node.type()
990  << " Target: " << TargetInfo::TargetType
991  << " Data Type: " << input0->info()->data_type()
992  << " Input0 shape: " << input0->info()->tensor_shape()
993  << " Input1 shape: " << input1->info()->tensor_shape()
994  << " Input2 shape: " << input2->info()->tensor_shape()
995  << " Output0 shape: " << output0->info()->tensor_shape()
996  << " Output1 shape: " << output1->info()->tensor_shape()
997  << " Output2 shape: " << output2->info()->tensor_shape()
998  << " Output3 shape: " << output3->info()->tensor_shape()
999  << " DetectionPostProcessLayer info: " << detect_info
1000  << std::endl);
1001 
1002  return std::move(func);
1003 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_detection_post_process_layer< CPPDetectionPostProcessLayer, CLTargetInfo >()

Definition at line 174 of file CLFunctionsFactory.cpp.

References ARM_COMPUTE_ERROR_ON, and ARM_COMPUTE_LOG_GRAPH_INFO.

Referenced by CLFunctionFactory::create().

175 {
176  validate_node<CLTargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
177 
178  // Extract IO and info
179  CLTargetInfo::TensorType *input0 = get_backing_tensor<CLTargetInfo>(node.input(0));
180  CLTargetInfo::TensorType *input1 = get_backing_tensor<CLTargetInfo>(node.input(1));
181  CLTargetInfo::TensorType *input2 = get_backing_tensor<CLTargetInfo>(node.input(2));
182  CLTargetInfo::TensorType *output0 = get_backing_tensor<CLTargetInfo>(node.output(0));
183  CLTargetInfo::TensorType *output1 = get_backing_tensor<CLTargetInfo>(node.output(1));
184  CLTargetInfo::TensorType *output2 = get_backing_tensor<CLTargetInfo>(node.output(2));
185  CLTargetInfo::TensorType *output3 = get_backing_tensor<CLTargetInfo>(node.output(3));
186  const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
187 
188  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
189  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
190  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
191  ARM_COMPUTE_ERROR_ON(output0 == nullptr);
192  ARM_COMPUTE_ERROR_ON(output1 == nullptr);
193  ARM_COMPUTE_ERROR_ON(output2 == nullptr);
194  ARM_COMPUTE_ERROR_ON(output3 == nullptr);
195 
196  // Create and configure function
197  auto func = std::make_unique<CPPDetectionPostProcessLayer>();
198  func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
199 
200  // Log info
201  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
202  << node.name()
203  << " Type: " << node.type()
204  << " Target: " << CLTargetInfo::TargetType
205  << " Data Type: " << input0->info()->data_type()
206  << " Input0 shape: " << input0->info()->tensor_shape()
207  << " Input1 shape: " << input1->info()->tensor_shape()
208  << " Input2 shape: " << input2->info()->tensor_shape()
209  << " Output0 shape: " << output0->info()->tensor_shape()
210  << " Output1 shape: " << output1->info()->tensor_shape()
211  << " Output2 shape: " << output2->info()->tensor_shape()
212  << " Output3 shape: " << output3->info()->tensor_shape()
213  << " DetectionPostProcessLayer info: " << detect_info
214  << std::endl);
215 
216  auto wrap_function = std::make_unique<CPPWrapperFunction>();
217 
218  wrap_function->register_function(std::move(func));
219  wrap_function->register_tensor(input0);
220  wrap_function->register_tensor(input1);
221  wrap_function->register_tensor(input2);
222  wrap_function->register_tensor(output0);
223  wrap_function->register_tensor(output1);
224  wrap_function->register_tensor(output2);
225  wrap_function->register_tensor(output3);
226 
227  return std::move(wrap_function);
228 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_eltwise_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_eltwise_layer ( EltwiseLayerNode node)

Create a backend element-wise operation layer function.

Template Parameters
EltwiseFunctionsBackend element-wise function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend element-wise operation layer function

Definition at line 1015 of file FunctionHelpers.h.

References arm_compute::graph::Add, ARM_COMPUTE_ERROR, ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::Div, arm_compute::graph::Max, arm_compute::graph::Mul, and arm_compute::graph::Sub.

1016 {
1017  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1018 
1019  // Extract IO and info
1020  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
1021  typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
1022  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1023  const EltwiseOperation eltwise_op = node.eltwise_operation();
1024  const ConvertPolicy convert_policy = node.convert_policy();
1025  const ActivationLayerInfo act_info = node.fused_activation();
1026  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1027  ARM_COMPUTE_ERROR_ON(input2 == nullptr);
1028  ARM_COMPUTE_ERROR_ON(output == nullptr);
1029 
1030  std::unique_ptr<IFunction> func = nullptr;
1031  std::string func_name;
1032  if(eltwise_op == EltwiseOperation::Add)
1033  {
1034  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
1035  std::string("ArithmeticAddition"),
1036  input1, input2, output, convert_policy, act_info);
1037  }
1038  else if(eltwise_op == EltwiseOperation::Sub)
1039  {
1040  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
1041  std::string("ArithmeticSubtraction"),
1042  input1, input2, output, convert_policy, act_info);
1043  }
1044  else if(eltwise_op == EltwiseOperation::Mul)
1045  {
1046  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
1047  std::string("PixelWiseMultiplication"),
1048  input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
1049  }
1050  else if(eltwise_op == EltwiseOperation::Max)
1051  {
1052  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
1053  std::string("ElementwiseMaximum"),
1054  input1, input2, output, act_info);
1055  }
1056  else if(eltwise_op == EltwiseOperation::Div)
1057  {
1058  std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Division>(
1059  std::string("ArithmeticDivision"),
1060  input1, input2, output, act_info);
1061  }
1062  else
1063  {
1064  ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
1065  }
1066 
1067  // Log info
1068  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1069  << node.name()
1070  << " Type: " << node.type()
1071  << " Target: " << TargetInfo::TargetType
1072  << " Operation: " << func_name
1073  << " Data Type: " << input1->info()->data_type()
1074  << " Shape: " << input1->info()->tensor_shape()
1075  << std::endl);
1076 
1077  return std::move(func);
1078 }
EltwiseOperation
Supported Element-wise operations.
Definition: Types.h:109
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
ConvertPolicy
Policy to handle integer overflow.
Definition: Types.h:404

◆ create_flatten_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_flatten_layer ( FlattenLayerNode node)

Create a backend flatten layer function.

Template Parameters
FlattenLayerFunctionBackend flatten function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend flatten layer function

Definition at line 1138 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1139 {
1140  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1141 
1142  // Extract IO and info
1143  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1144  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1145 
1146  ARM_COMPUTE_ERROR_ON(input == nullptr);
1147  ARM_COMPUTE_ERROR_ON(output == nullptr);
1148 
1149  // Create and configure function
1150  auto func = std::make_unique<FlattenLayerFunction>();
1151  func->configure(input, output);
1152 
1153  // Log info
1154  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1155  << node.name()
1156  << " Type: " << node.type()
1157  << " Target: " << TargetInfo::TargetType
1158  << " Data Type: " << input->info()->data_type()
1159  << " Input shape: " << input->info()->tensor_shape()
1160  << " Output shape: " << output->info()->tensor_shape()
1161  << std::endl);
1162 
1163  return std::move(func);
1164 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_fully_connected_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_fully_connected_layer ( FullyConnectedLayerNode node,
GraphContext ctx 
)

Create a backend fully connected layer function.

Template Parameters
FullyConnectedLayerFunctionBackend fully-connected function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend fully connected layer function

Definition at line 1177 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, FullyConnectedLayerInfo::enable_fast_math, arm_compute::graph::Enabled, arm_compute::graph::backends::get_memory_manager(), arm_compute::graph::backends::get_weights_manager(), arm_compute::test::validation::input, and arm_compute::is_data_type_quantized_asymmetric().

1178 {
1179  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1180 
1181  // Extract IO and info
1182  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1183  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
1184  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
1185  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1186  FullyConnectedLayerInfo fc_info = node.info();
1187  fc_info.enable_fast_math = (node.fast_math_hint() == FastMathHint::Enabled);
1188 
1189  ARM_COMPUTE_ERROR_ON(input == nullptr);
1190  ARM_COMPUTE_ERROR_ON(weights == nullptr);
1191  ARM_COMPUTE_ERROR_ON(output == nullptr);
1192 
1193  // Create and configure function
1194  auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
1195  auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
1196  auto func = std::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
1197  func->configure(input, weights, biases, output, fc_info);
1198 
1199  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
1200 
1201  // Log info
1202  std::ostringstream qss;
1203  if(is_quantized)
1204  {
1205  qss << " Input QuantInfo: " << input->info()->quantization_info()
1206  << " Weights QuantInfo: " << weights->info()->quantization_info()
1207  << " Output QuantInfo: " << output->info()->quantization_info();
1208  }
1209  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1210  << node.name()
1211  << " Type: " << node.type()
1212  << " Target: " << TargetInfo::TargetType
1213  << " Data Type: " << input->info()->data_type()
1214  << qss.str()
1215  << " Input shape: " << input->info()->tensor_shape()
1216  << " Weights shape: " << weights->info()->tensor_shape()
1217  << " Output shape: " << output->info()->tensor_shape()
1218  << std::endl);
1219 
1220  return std::move(func);
1221 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1052
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89
std::shared_ptr< IWeightsManager > get_weights_manager(GraphContext &ctx, Target target)
Returns the weights manager for a given target.
Definition: Utils.h:102

◆ create_fused_convolution_batch_normalization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_fused_convolution_batch_normalization_layer ( FusedConvolutionBatchNormalizationNode node,
GraphContext ctx 
)

Create a backend batch normalization layer function.

Template Parameters
BatchNormalizationLayerFunctionBackend batch normalization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend batch normalization layer function

Definition at line 222 of file FunctionHelpers.h.

References ActivationLayerInfo::activation(), ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, arm_compute::graph::Enabled, ActivationLayerInfo::enabled(), arm_compute::quantization::epsilon, arm_compute::graph::backends::get_memory_manager(), arm_compute::test::validation::input, arm_compute::test::validation::num_groups, and arm_compute::to_string().

223 {
224  validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
225 
226  // Extract IO and info
227  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
228  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
229  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
230  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
231  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
232  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
233  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
234 
235  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
236 
237  const PadStrideInfo conv_info = node.convolution_info();
238  const unsigned int num_groups = node.num_groups();
239  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
240  const ActivationLayerInfo fused_act = node.fused_activation();
241  const float epsilon = node.epsilon();
242 
243  // Create and configure function (we assume that functions have been validated before creation)
244  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
245  std::unique_ptr<IFunction> func;
246  std::string func_name;
247 
248  using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
249 
250  // Create and configure function
251  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
252  std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
253 
254  // Log info
255  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
256  << node.name()
257  << " Type: " << node.type()
258  << " Target: " << TargetInfo::TargetType
259  << " Data Type: " << input->info()->data_type()
260  << " Input shape: " << input->info()->tensor_shape()
261  << " Weights shape: " << weights->info()->tensor_shape()
262  << " Output shape: " << output->info()->tensor_shape()
263  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
264  << std::endl);
265  return std::move(func);
266 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const CLBuildOptions &cl_build_opts)
Definition: Utils.h:51
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
const unsigned int num_groups
Definition: Im2Col.cpp:153
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_fused_convolution_batch_normalization_with_post_op()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_fused_convolution_batch_normalization_with_post_op ( FusedConvolutionBatchNormalizationWithPostOpsNode node,
GraphContext ctx 
)

Create a backend convolution batch normalization layer function with post operator.

Template Parameters
FusedLayerTypesBackend convolution functions
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend fused convolution with batch normalization layer function

Definition at line 649 of file FunctionHelpers.h.

References arm_compute::experimental::Activation, ARM_COMPUTE_ERROR, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, arm_compute::graph::Enabled, arm_compute::quantization::epsilon, arm_compute::graph::backends::get_memory_manager(), arm_compute::test::validation::input, arm_compute::test::validation::num_groups, and arm_compute::test::validation::post_ops.

650 {
651  validate_node<TargetInfo>(node, 8 /* expected inputs */, 1 /* expected outputs */);
652 
653  // Extract IO and info
654  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
655  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
656  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
657  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
658  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
659  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
660  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
661 
662  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
663 
664  const PadStrideInfo conv_info = node.convolution_info();
665  const unsigned int num_groups = node.num_groups();
666  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
667  const float epsilon = node.epsilon();
668 
669  experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
670 
671  auto &post_op_info_list = node.post_op_info_list();
672  for(const auto &post_op_info : post_op_info_list)
673  {
674  switch(post_op_info->type())
675  {
677  {
678  const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
679  post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
680  break;
681  }
682  case PostOpType::Eltwise_Add:
683  {
684  typename TargetInfo::TensorType *add_input = get_backing_tensor<TargetInfo>(node.input(3));
685  const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
686  post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
687  break;
688  }
689  default:
690  {
691  ARM_COMPUTE_ERROR("Unsupported PostOpType");
692  }
693  }
694  }
695 
696  // Create and configure function (we assume that functions have been validated before creation)
697  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
698  std::unique_ptr<IFunction> func;
699  std::string func_name;
700 
701  using FType = FusedConvolutionBatchNormalizationWithPostOpsFunction<TargetInfo, FusedLayerTypes>;
702 
703  // Create and configure function
704  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
705  std::string("FusedConvolutionBatchNormalizationLayerWithPostOpsLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, post_ops);
706 
707  // Log info
708  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
709  << node.name()
710  << " Type: " << node.type()
711  << " Target: " << TargetInfo::TargetType
712  << " Data Type: " << input->info()->data_type()
713  << " Input shape: " << input->info()->tensor_shape()
714  << " Weights shape: " << weights->info()->tensor_shape()
715  << " Output shape: " << output->info()->tensor_shape()
716  << " Post Ops:" << post_ops
717  << std::endl);
718  return std::move(func);
719 }
experimental::PostOpList< ITensorInfo * > post_ops
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
TensorType
Memory type.
Definition: Types.h:38
arm_compute::ActivationLayerInfo::ActivationFunction Activation
Constant TensorID specifying an equivalent of null tensor.
Definition: Types.h:73
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
const unsigned int num_groups
Definition: Im2Col.cpp:153
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_fused_convolution_with_post_op()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_fused_convolution_with_post_op ( FusedConvolutionWithPostOpNode node,
GraphContext ctx 
)

Create a backend convolution layer function with post operator.

Template Parameters
ConvolutionLayerFunctionsBackend convolution functions
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend convolution layer function

Definition at line 555 of file FunctionHelpers.h.

References arm_compute::experimental::Activation, ARM_COMPUTE_ERROR, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, arm_compute::graph::backends::get_memory_manager(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), arm_compute::test::validation::num_groups, arm_compute::test::validation::post_ops, arm_compute::S32, arm_compute::to_string(), and arm_compute::utils::cast::U.

556 {
557  validate_node<TargetInfo>(node, 4 /* expected inputs */, 1 /* expected outputs */);
558 
559  // Extract IO and info
560  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
561  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
562  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
563  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
564 
565  const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
566 
567  if(is_quantized)
568  {
569  biases->info()->set_data_type(DataType::S32);
570  }
571 
572  const PadStrideInfo conv_info = node.convolution_info();
573  const unsigned int num_groups = node.num_groups();
574  const ActivationLayerInfo fused_act = node.fused_activation();
575 
576  experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
577 
578  auto &post_op_info_list = node.post_op_info_list();
579  for(const auto &post_op_info : post_op_info_list)
580  {
581  switch(post_op_info->type())
582  {
584  {
585  const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
586  post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
587  break;
588  }
589  case PostOpType::Eltwise_Add:
590  {
591  typename TargetInfo::TensorType *add_input = get_backing_tensor<TargetInfo>(node.input(3));
592  const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
593  post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
594  break;
595  }
596  default:
597  {
598  ARM_COMPUTE_ERROR("Unsupported PostOpType");
599  }
600  }
601  }
602 
603  // Create and configure function (we assume that functions have been validated before creation)
604  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
605  std::unique_ptr<IFunction> func;
606  std::string func_name;
607 
608  // Fuse convolution with post ops is only supported for conv1x1, which is only implemented as gemmconv2d
609  std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
610  std::string("GEMMConvolutionLayer"), mm,
611  input, weights, biases, output, conv_info,
612  WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups, post_ops);
613 
614  // Log info
615  std::ostringstream qss;
616  if(is_quantized)
617  {
618  qss << " Input QuantInfo: " << input->info()->quantization_info()
619  << " Weights QuantInfo: " << weights->info()->quantization_info()
620  << " Output QuantInfo: " << output->info()->quantization_info();
621  }
622  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
623  << node.name()
624  << " Type: " << func_name
625  << " Target: " << TargetInfo::TargetType
626  << " Data Type: " << input->info()->data_type()
627  << " Groups: " << num_groups
628  << " Input shape: " << input->info()->tensor_shape()
629  << " Weights shape: " << weights->info()->tensor_shape()
630  << " Output shape: " << output->info()->tensor_shape()
631  << qss.str()
632  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
633  << " Post ops" << post_ops
634  << std::endl);
635  return std::move(func);
636 }
experimental::PostOpList< ITensorInfo * > post_ops
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const CLBuildOptions &cl_build_opts)
Definition: Utils.h:51
arm_compute::ActivationLayerInfo::ActivationFunction Activation
Constant TensorID specifying an equivalent of null tensor.
Definition: Types.h:73
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
const unsigned int num_groups
Definition: Im2Col.cpp:153
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1052
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_fused_depthwise_convolution_batch_normalization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_fused_depthwise_convolution_batch_normalization_layer ( FusedDepthwiseConvolutionBatchNormalizationNode node,
GraphContext ctx 
)

Create a backend fused depthwise convolution batch normalization layer function.

Template Parameters
FusedLayerTypesFused layer types
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend fused depthwise convolution batch normalization layer function

Definition at line 279 of file FunctionHelpers.h.

References ActivationLayerInfo::activation(), ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::conv_info, ActivationLayerInfo::enabled(), arm_compute::quantization::epsilon, arm_compute::graph::backends::get_memory_manager(), arm_compute::test::validation::input, and arm_compute::to_string().

280 {
281  validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
282 
283  // Extract IO and info
284  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
285  typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
286  typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
287  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
288  typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
289  typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
290  typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
291 
292  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
293 
294  const PadStrideInfo conv_info = node.convolution_info();
295  const unsigned int depth_multiplier = node.depth_multiplier();
296  const ActivationLayerInfo fused_act = node.fused_activation();
297  const float epsilon = node.epsilon();
298 
299  // Create and configure function (we assume that functions have been validated before creation)
300  std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
301  std::unique_ptr<IFunction> func;
302  std::string func_name;
303 
304  using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
305 
306  // Create and configure function
307  std::tie(func, func_name) = create_named_memory_managed_function<FType>(
308  std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
309 
310  // Log info
311  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
312  << node.name()
313  << " Type: " << node.type()
314  << " Target: " << TargetInfo::TargetType
315  << " Data Type: " << input->info()->data_type()
316  << " Input shape: " << input->info()->tensor_shape()
317  << " Weights shape: " << weights->info()->tensor_shape()
318  << " Output shape: " << output->info()->tensor_shape()
319  << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
320  << std::endl);
321  return std::move(func);
322 }
TensorType
Memory type.
Definition: Types.h:38
std::string to_string(const CLBuildOptions &cl_build_opts)
Definition: Utils.h:51
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_generate_proposals_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_generate_proposals_layer ( GenerateProposalsLayerNode node,
GraphContext ctx 
)

Create a backend generate proposals layer function.

Template Parameters
GenerateProposalsLayerFunctionBackend generate proposals function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend generate proposals layer function

Definition at line 1234 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::backends::get_memory_manager(), and arm_compute::test::validation::info.

1235 {
1236  validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
1237 
1238  // Extract IO and info
1239  typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
1240  typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
1241  typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
1242  typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
1243  typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
1244  typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
1245  const GenerateProposalsInfo info = node.info();
1246 
1247  ARM_COMPUTE_ERROR_ON(scores == nullptr);
1248  ARM_COMPUTE_ERROR_ON(deltas == nullptr);
1249  ARM_COMPUTE_ERROR_ON(anchors == nullptr);
1250  ARM_COMPUTE_ERROR_ON(proposals == nullptr);
1251  ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
1252 
1253  // Create and configure function
1254  auto func = std::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1255  func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1256 
1257  // Log info
1258  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1259  << " Target " << TargetInfo::TargetType
1260  << " Data Type: " << scores->info()->data_type()
1261  << " Scores shape: " << scores->info()->tensor_shape()
1262  << " Deltas shape: " << deltas->info()->tensor_shape()
1263  << " Anchors shape: " << anchors->info()->tensor_shape()
1264  << " Proposals shape: " << proposals->info()->tensor_shape()
1265  << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1266  << " Scores Out shape: " << scores_out->info()->tensor_shape()
1267  << std::endl);
1268 
1269  return std::move(func);
1270 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_l2_normalize_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_l2_normalize_layer ( L2NormalizeLayerNode node,
GraphContext ctx 
)

Create a backend l2 normalization layer function.

Template Parameters
NormalizationLayerFunctionBackend normalization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend normalization layer function

Definition at line 1283 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::quantization::epsilon, arm_compute::graph::backends::get_memory_manager(), and arm_compute::test::validation::input.

1284 {
1285  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1286 
1287  // Extract IO and info
1288  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1289  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1290  int axis = node.axis();
1291  float epsilon = node.epsilon();
1292 
1293  ARM_COMPUTE_ERROR_ON(input == nullptr);
1294  ARM_COMPUTE_ERROR_ON(output == nullptr);
1295 
1296  // Create and configure function
1297  auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
1298  auto func = std::make_unique<L2NormalizeLayerFunction>(mm);
1299  func->configure(input, output, axis, epsilon);
1300 
1301  // Log info
1302  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1303  << node.name()
1304  << " Type: " << node.type()
1305  << " Target: " << TargetInfo::TargetType
1306  << " Data Type: " << input->info()->data_type()
1307  << " Input shape: " << input->info()->tensor_shape()
1308  << " Output shape: " << output->info()->tensor_shape()
1309  << " Axis: " << axis
1310  << " Epsilon: " << epsilon
1311  << std::endl);
1312 
1313  return std::move(func);
1314 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_normalization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_normalization_layer ( NormalizationLayerNode node,
GraphContext ctx 
)

Create a backend normalization layer function.

Template Parameters
NormalizationLayerFunctionBackend normalization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend normalization layer function

Definition at line 1327 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ARM_COMPUTE_UNUSED, and arm_compute::test::validation::input.

1328 {
1329  ARM_COMPUTE_UNUSED(ctx);
1330 
1331  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1332 
1333  // Extract IO and info
1334  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1335  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1336  const NormalizationLayerInfo norm_info = node.normalization_info();
1337  ARM_COMPUTE_ERROR_ON(input == nullptr);
1338  ARM_COMPUTE_ERROR_ON(output == nullptr);
1339 
1340  // Create and configure function
1341  auto func = std::make_unique<NormalizationLayerFunction>();
1342  func->configure(input, output, norm_info);
1343 
1344  // Log info
1345  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1346  << node.name()
1347  << " Type: " << node.type()
1348  << " Target: " << TargetInfo::TargetType
1349  << " Data Type: " << input->info()->data_type()
1350  << " Input shape: " << input->info()->tensor_shape()
1351  << " Output shape: " << output->info()->tensor_shape()
1352  << " Normalization info: " << norm_info.type()
1353  << std::endl);
1354 
1355  return std::move(func);
1356 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152

◆ create_normalization_layer< NENormalizationLayer, NETargetInfo >()

Definition at line 93 of file NEFunctionFactory.cpp.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::backends::get_memory_manager(), and arm_compute::test::validation::input.

Referenced by NEFunctionFactory::create().

94 {
95  validate_node<NETargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
96 
97  // Extract IO and info
98  NETargetInfo::TensorType *input = get_backing_tensor<NETargetInfo>(node.input(0));
99  NETargetInfo::TensorType *output = get_backing_tensor<NETargetInfo>(node.output(0));
100  const NormalizationLayerInfo norm_info = node.normalization_info();
101  ARM_COMPUTE_ERROR_ON(input == nullptr);
102  ARM_COMPUTE_ERROR_ON(output == nullptr);
103 
104  // Create and configure function
105  auto func = std::make_unique<NENormalizationLayer>(get_memory_manager(ctx, NETargetInfo::TargetType));
106  func->configure(input, output, norm_info);
107 
108  // Log info
109  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
110  << node.name()
111  << " Type: " << node.type()
112  << " Target: " << NETargetInfo::TargetType
113  << " Data Type: " << input->info()->data_type()
114  << " Input shape: " << input->info()->tensor_shape()
115  << " Output shape: " << output->info()->tensor_shape()
116  << " Normalization info: " << norm_info.type()
117  << std::endl);
118 
119  return std::move(func);
120 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_normalize_planar_yuv_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_normalize_planar_yuv_layer ( NormalizePlanarYUVLayerNode node)

Create a backend normalize planar YUV layer function.

Template Parameters
NormalizePlanarYUVLayerFunctionBackend normalize planar YUV function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend normalize plnar YUV layer function

Definition at line 1368 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1369 {
1370  validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1371 
1372  // Extract IO and info
1373  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1374  typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1375  typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1376  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1377  ARM_COMPUTE_ERROR_ON(input == nullptr);
1378  ARM_COMPUTE_ERROR_ON(mean == nullptr);
1379  ARM_COMPUTE_ERROR_ON(std == nullptr);
1380  ARM_COMPUTE_ERROR_ON(output == nullptr);
1381 
1382  // Create and configure function
1383  auto func = std::make_unique<NormalizePlanarYUVLayerFunction>();
1384  func->configure(input, output, mean, std);
1385 
1386  // Log info
1387  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1388  << node.name()
1389  << " Type: " << node.type()
1390  << " Target: " << TargetInfo::TargetType
1391  << " Data Type: " << input->info()->data_type()
1392  << " Shape: " << input->info()->tensor_shape()
1393  << std::endl);
1394 
1395  return std::move(func);
1396 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_pad_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_pad_layer ( PadLayerNode node)

Create a backend pad layer function.

Template Parameters
PadLayerFunctionBackend pad function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend pad layer function

Definition at line 1408 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1409 {
1410  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1411 
1412  // Extract IO and info
1413  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1414  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1415  const PaddingList &padding = node.padding();
1416  const PixelValue pad_value = node.pad_value();
1417  ARM_COMPUTE_ERROR_ON(input == nullptr);
1418  ARM_COMPUTE_ERROR_ON(output == nullptr);
1419 
1420  // Create and configure function
1421  auto func = std::make_unique<PadLayerFunction>();
1422  func->configure(input, output, padding, pad_value);
1423 
1424  // Log info
1425  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1426  << node.name()
1427  << " Type: " << node.type()
1428  << " Target: " << TargetInfo::TargetType
1429  << " Data Type: " << input->info()->data_type()
1430  << " Input shape: " << input->info()->tensor_shape()
1431  << " Output shape: " << output->info()->tensor_shape()
1432  << std::endl);
1433 
1434  return std::move(func);
1435 }
std::vector< PaddingInfo > PaddingList
List of padding information.
Definition: Types.h:453
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_permute_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_permute_layer ( PermuteLayerNode node)

Create a backend permute layer function.

Template Parameters
PermuteLayerFunctionBackend permute function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend permute layer function

Definition at line 1447 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1448 {
1449  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1450 
1451  // Extract IO and info
1452  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1453  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1454  const PermutationVector &perm = node.permutation_vector();
1455  ARM_COMPUTE_ERROR_ON(input == nullptr);
1456  ARM_COMPUTE_ERROR_ON(output == nullptr);
1457 
1458  // Create and configure function
1459  auto func = std::make_unique<PermuteLayerFunction>();
1460  func->configure(input, output, perm);
1461 
1462  // Log info
1463  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1464  << node.name()
1465  << " Type: " << node.type()
1466  << " Target: " << TargetInfo::TargetType
1467  << " Data Type: " << input->info()->data_type()
1468  << " Input shape: " << input->info()->tensor_shape()
1469  << " Output shape: " << output->info()->tensor_shape()
1470  << " Permutation vector: " << perm
1471  << std::endl);
1472 
1473  return std::move(func);
1474 }
TensorType
Memory type.
Definition: Types.h:38
Strides PermutationVector
Permutation vector.
Definition: Types.h:51
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_pooling_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_pooling_layer ( PoolingLayerNode node)

Create a backend pooling layer function.

Template Parameters
PoolingLayerFunctionBackend pooling function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend pooling layer function

Definition at line 1486 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1487 {
1488  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1489 
1490  // Extract IO and info
1491  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1492  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1493  const PoolingLayerInfo pool_info = node.pooling_info();
1494  ARM_COMPUTE_ERROR_ON(input == nullptr);
1495  ARM_COMPUTE_ERROR_ON(output == nullptr);
1496 
1497  // Create and configure function
1498  auto func = std::make_unique<PoolingLayerFunction>();
1499  func->configure(input, output, pool_info);
1500 
1501  // Log info
1502  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1503  << node.name()
1504  << " Type: " << node.type()
1505  << " Target: " << TargetInfo::TargetType
1506  << " Data Type: " << input->info()->data_type()
1507  << " Input shape: " << input->info()->tensor_shape()
1508  << " Output shape: " << output->info()->tensor_shape()
1509  << " Pooling info: " << pool_info.pool_type
1510  << std::endl);
1511 
1512  return std::move(func);
1513 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_prelu_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_prelu_layer ( PReluLayerNode node)

Create a backend PRelu layer function.

Template Parameters
PReluFunctionBackend PRelu function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend PRelu layer function

Definition at line 1525 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1526 {
1527  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1528 
1529  // Extract IO and info
1530  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1531  typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1532  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1533  ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1534  ARM_COMPUTE_ERROR_ON(output == nullptr);
1535 
1536  // Create and configure function
1537  auto func = std::make_unique<PReluFunction>();
1538  func->configure(input, alpha, output);
1539 
1540  // Log info
1541  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1542  << node.name()
1543  << " Type: " << node.type()
1544  << " Target: " << TargetInfo::TargetType
1545  << " Data Type: " << input->info()->data_type()
1546  << " Input shape: " << input->info()->tensor_shape()
1547  << " Output shape: " << output->info()->tensor_shape()
1548  << std::endl);
1549 
1550  return std::move(func);
1551 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_print_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_print_layer ( PrintLayerNode node)

Create a backend print layer function.

Template Parameters
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend print layer function

Definition at line 1562 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ARM_COMPUTE_UNUSED, and arm_compute::test::validation::input.

1563 {
1564  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1565 
1566  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1567  ARM_COMPUTE_ERROR_ON(input == nullptr);
1568  ARM_COMPUTE_UNUSED(input);
1569 
1570  // Log info
1571  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1572  << node.name()
1573  << " Type: " << node.type()
1574  << " Target: " << TargetInfo::TargetType
1575  << " Data Type: " << input->info()->data_type()
1576  << " Input shape: " << input->info()->tensor_shape()
1577  << std::endl);
1578 
1579  return nullptr;
1580 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152

◆ create_priorbox_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_priorbox_layer ( PriorBoxLayerNode node)

Create a backend priorbox layer function.

Template Parameters
PriorBoxLayerFunctionBackend priorbox function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend priorbox layer function

Definition at line 1592 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, and ARM_COMPUTE_LOG_GRAPH_INFO.

1593 {
1594  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1595 
1596  // Extract IO and info
1597  typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1598  typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1599  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1600  const PriorBoxLayerInfo prior_info = node.priorbox_info();
1601  ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1602  ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1603  ARM_COMPUTE_ERROR_ON(output == nullptr);
1604 
1605  // Create and configure function
1606  auto func = std::make_unique<PriorBoxLayerFunction>();
1607  func->configure(input0, input1, output, prior_info);
1608 
1609  // Log info
1610  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1611  << node.name()
1612  << " Type: " << node.type()
1613  << " Target: " << TargetInfo::TargetType
1614  << " Data Type: " << input0->info()->data_type()
1615  << " Input0 shape: " << input0->info()->tensor_shape()
1616  << " Input1 shape: " << input1->info()->tensor_shape()
1617  << " Output shape: " << output->info()->tensor_shape()
1618  << " PriorBoxLayer info: " << prior_info
1619  << std::endl);
1620 
1621  return std::move(func);
1622 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_quantization_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_quantization_layer ( QuantizationLayerNode node)

Create a backend quantization layer function.

Template Parameters
QuantizationLayerFunctionBackend quantization function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend quantization layer function

Definition at line 1634 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1635 {
1636  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1637 
1638  // Extract IO and info
1639  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1640  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1641  ARM_COMPUTE_ERROR_ON(input == nullptr);
1642  ARM_COMPUTE_ERROR_ON(output == nullptr);
1643 
1644  // Create and configure function
1645  auto func = std::make_unique<QuantizationLayerFunction>();
1646  func->configure(input, output);
1647 
1648  // Log info
1649  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1650  << node.name()
1651  << " Type: " << node.type()
1652  << " Target: " << TargetInfo::TargetType
1653  << " Data Type: " << input->info()->data_type()
1654  << " Input shape: " << input->info()->tensor_shape()
1655  << " Output shape: " << output->info()->tensor_shape()
1656  << std::endl);
1657 
1658  return std::move(func);
1659 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_reduction_operation_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_reduction_operation_layer ( ReductionLayerNode node,
GraphContext ctx 
)

Create a backend reduction operation layer function.

Template Parameters
ReductionOperationFunctionBackend reduction operation function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend reduction sum layer function

Definition at line 1672 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::backends::get_memory_manager(), and arm_compute::test::validation::input.

1673 {
1674  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1675 
1676  // Extract IO and info
1677  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1678  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1679  ReductionOperation op = node.op();
1680  int axis = node.axis();
1681  bool keep_dims = node.keep_dims();
1682  ARM_COMPUTE_ERROR_ON(input == nullptr);
1683  ARM_COMPUTE_ERROR_ON(output == nullptr);
1684 
1685  // Create and configure function
1686  auto func = std::make_unique<ReductionOperationFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1687  func->configure(input, output, axis, op, keep_dims);
1688 
1689  // Log info
1690  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1691  << node.name()
1692  << " Type: " << node.type()
1693  << " Target: " << TargetInfo::TargetType
1694  << " Data Type: " << input->info()->data_type()
1695  << " Input shape: " << input->info()->tensor_shape()
1696  << " Output shape: " << output->info()->tensor_shape()
1697  << " Operation: " << op
1698  << " Axis: " << axis
1699  << " Keep dimensions:" << keep_dims
1700  << std::endl);
1701 
1702  return std::move(func);
1703 }
ReductionOperation
Available reduction operations.
Definition: Types.h:476
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_reorg_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_reorg_layer ( ReorgLayerNode node)

Create a backend reorg layer function.

Template Parameters
ReorgLayerFunctionBackend reorg function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend reshape layer function

Definition at line 1715 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1716 {
1717  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1718 
1719  // Extract IO and info
1720  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1721  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1722  ARM_COMPUTE_ERROR_ON(input == nullptr);
1723  ARM_COMPUTE_ERROR_ON(output == nullptr);
1724 
1725  // Create and configure function
1726  auto func = std::make_unique<ReorgLayerFunction>();
1727  func->configure(input, output, node.stride());
1728 
1729  // Log info
1730  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1731  << node.name()
1732  << " Type: " << node.type()
1733  << " Target: " << TargetInfo::TargetType
1734  << " Data Type: " << input->info()->data_type()
1735  << " Input shape: " << input->info()->tensor_shape()
1736  << " Output shape: " << output->info()->tensor_shape()
1737  << std::endl);
1738 
1739  return std::move(func);
1740 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_reshape_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_reshape_layer ( ReshapeLayerNode node)

Create a backend reshape layer function.

Template Parameters
ReshapeLayerFunctionBackend reshape function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend reshape layer function

Definition at line 1752 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1753 {
1754  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1755 
1756  // Extract IO and info
1757  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1758  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1759  ARM_COMPUTE_ERROR_ON(input == nullptr);
1760  ARM_COMPUTE_ERROR_ON(output == nullptr);
1761 
1762  // Create and configure function
1763  auto func = std::make_unique<ReshapeLayerFunction>();
1764  func->configure(input, output);
1765 
1766  // Log info
1767  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1768  << node.name()
1769  << " Type: " << node.type()
1770  << " Target: " << TargetInfo::TargetType
1771  << " Data Type: " << input->info()->data_type()
1772  << " Input shape: " << input->info()->tensor_shape()
1773  << " Output shape: " << output->info()->tensor_shape()
1774  << std::endl);
1775 
1776  return std::move(func);
1777 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_resize_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_resize_layer ( ResizeLayerNode node)

Create a backend resize layer function.

Template Parameters
ResizeLayerFunctionBackend resize function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend resize layer function

Definition at line 1789 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::CENTER, arm_compute::CONSTANT, and arm_compute::test::validation::input.

1790 {
1791  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1792 
1793  // Extract IO and info
1794  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1795  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1796  ARM_COMPUTE_ERROR_ON(input == nullptr);
1797  ARM_COMPUTE_ERROR_ON(output == nullptr);
1798  const InterpolationPolicy policy = node.policy();
1799 
1800  // Create and configure function
1801  auto func = std::make_unique<ResizeLayerFunction>();
1802  func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false });
1803 
1804  // Log info
1805  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1806  << node.name()
1807  << " Type: " << node.type()
1808  << " Target: " << TargetInfo::TargetType
1809  << " Data Type: " << input->info()->data_type()
1810  << " Input shape: " << input->info()->tensor_shape()
1811  << " Output shape: " << output->info()->tensor_shape()
1812  << " Interpolation: " << policy
1813  << std::endl);
1814 
1815  return std::move(func);
1816 }
InterpolationPolicy
Interpolation method.
Definition: Types.h:411
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_roi_align_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_roi_align_layer ( ROIAlignLayerNode node)

Create a backend ROI align layer function.

Template Parameters
ROIAlignLayerFunctionROI Align function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
ROI Align layer function

Definition at line 1828 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::test::validation::input, ROIPoolingLayerInfo::pooled_height(), and ROIPoolingLayerInfo::pooled_width().

1829 {
1830  validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1831 
1832  // Extract IO and info
1833  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1834  typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1835  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1836  ARM_COMPUTE_ERROR_ON(input == nullptr);
1837  ARM_COMPUTE_ERROR_ON(output == nullptr);
1838  ARM_COMPUTE_ERROR_ON(rois == nullptr);
1839 
1840  const ROIPoolingLayerInfo pool_info = node.pooling_info();
1841 
1842  // Create and configure function
1843  auto func = std::make_unique<ROIAlignLayerFunction>();
1844 
1845  func->configure(input, rois, output, pool_info);
1846 
1847  // Log info
1848  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1849  << node.name()
1850  << " Type: " << node.type()
1851  << " Target: " << TargetInfo::TargetType
1852  << " Data Type: " << input->info()->data_type()
1853  << " Input shape: " << input->info()->tensor_shape()
1854  << " Output shape: " << output->info()->tensor_shape()
1855  << " ROIs shape: " << rois->info()->tensor_shape()
1856  << " ROIPooling width: " << pool_info.pooled_width()
1857  << " ROIPooling height: " << pool_info.pooled_height()
1858  << std::endl);
1859 
1860  return std::move(func);
1861 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_slice_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_slice_layer ( SliceLayerNode node)

Create a backend slice layer function.

Template Parameters
SliceLayerFunctionBackend slice function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend slice layer function

Definition at line 1873 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, and arm_compute::test::validation::input.

1874 {
1875  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1876 
1877  // Extract IO and info
1878  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1879  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1880  ARM_COMPUTE_ERROR_ON(input == nullptr);
1881  ARM_COMPUTE_ERROR_ON(output == nullptr);
1882 
1883  // Create and configure function
1884  auto func = std::make_unique<SliceLayerFunction>();
1885  func->configure(input, output, node.starts(), node.ends());
1886 
1887  // Log info
1888  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1889  << node.name()
1890  << " Type: " << node.type()
1891  << " Target: " << TargetInfo::TargetType
1892  << " Data Type: " << input->info()->data_type()
1893  << " Input shape: " << input->info()->tensor_shape()
1894  << " Output shape: " << output->info()->tensor_shape()
1895  << std::endl);
1896 
1897  return std::move(func);
1898 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54

◆ create_softmax_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_softmax_layer ( SoftmaxLayerNode node,
GraphContext ctx 
)

Create a backend softmax layer function.

Template Parameters
SoftmaxLayerFunctionBackend softmax function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
[in]ctxGraph context
Returns
Backend softmax layer function

Definition at line 1911 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::backends::get_memory_manager(), and arm_compute::test::validation::input.

1912 {
1913  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1914 
1915  // Extract IO and info
1916  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1917  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1918  const float beta = node.beta();
1919  ARM_COMPUTE_ERROR_ON(input == nullptr);
1920  ARM_COMPUTE_ERROR_ON(output == nullptr);
1921 
1922  // Create and configure function
1923  auto func = std::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1924  func->configure(input, output, beta);
1925 
1926  // Log info
1927  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1928  << node.name()
1929  << " Type: " << node.type()
1930  << " Target: " << TargetInfo::TargetType
1931  << " Data Type: " << input->info()->data_type()
1932  << " Input shape: " << input->info()->tensor_shape()
1933  << " Output shape: " << output->info()->tensor_shape()
1934  << std::endl);
1935 
1936  return std::move(func);
1937 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
Definition: Utils.h:89

◆ create_stack_layer()

std::unique_ptr<arm_compute::IFunction> arm_compute::graph::backends::detail::create_stack_layer ( StackLayerNode node)

Create a backend layer stack function.

Template Parameters
StackLayerFunctionBackend stack function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend stack layer function

Definition at line 1949 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, ARM_COMPUTE_LOG_GRAPH_VERBOSE, StackLayerNode::axis(), INode::id(), INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and StackLayerNode::type().

1950 {
1951  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1952  ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1953 
1954  // Extract IO and info
1955  std::vector<typename TargetInfo::TensorType *> inputs;
1956  for(unsigned int i = 0; i < node.num_inputs(); ++i)
1957  {
1958  inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1959  }
1960  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1961  const int axis = node.axis();
1962 
1963  // Create and configure function
1964  auto func = std::make_unique<StackLayerFunction>();
1965  func->configure(inputs, axis, output);
1966 
1967  // Log info
1968  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1969  << node.name()
1970  << " Type: " << node.type()
1971  << " Target: " << TargetInfo::TargetType
1972  << " Data Type: " << output->info()->data_type()
1973  << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1974  << " Output shape: " << output->info()->tensor_shape()
1975  << " Num Inputs: " << inputs.size()
1976  << " Axis: " << axis
1977  << std::endl);
1978 
1979  return std::move(func);
1980 }
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ create_strided_slice_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_strided_slice_layer ( StridedSliceLayerNode node)

Create a backend slice layer function.

Template Parameters
StridedSliceLayerFunctionBackend strided slice function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend strided slice layer function

Definition at line 1992 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, StridedSliceLayerInfo::begin_mask(), StridedSliceLayerInfo::end_mask(), arm_compute::test::validation::info, arm_compute::test::validation::input, and StridedSliceLayerInfo::shrink_axis_mask().

1993 {
1994  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1995 
1996  // Extract IO and info
1997  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1998  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1999  Coordinates starts = node.starts();
2000  Coordinates ends = node.ends();
2001  BiStrides strides = node.strides();
2002  StridedSliceLayerInfo info = node.strided_slice_info();
2003 
2004  ARM_COMPUTE_ERROR_ON(input == nullptr);
2005  ARM_COMPUTE_ERROR_ON(output == nullptr);
2006 
2007  // Create and configure function
2008  auto func = std::make_unique<StridedSliceLayerFunction>();
2009  func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
2010 
2011  // Log info
2012  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
2013  << node.name()
2014  << " Type: " << node.type()
2015  << " Target: " << TargetInfo::TargetType
2016  << " Data Type: " << input->info()->data_type()
2017  << " Input shape: " << input->info()->tensor_shape()
2018  << " Output shape: " << output->info()->tensor_shape()
2019  << std::endl);
2020 
2021  return std::move(func);
2022 }
TensorType
Memory type.
Definition: Types.h:38
Coordinates BiStrides
Bidirectional strides.
Definition: Types.h:53
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)

◆ create_unary_eltwise_layer()

std::unique_ptr<IFunction> arm_compute::graph::backends::detail::create_unary_eltwise_layer ( UnaryEltwiseLayerNode node)

Create a backend unary element-wise operation layer function.

Template Parameters
UnaryEltwiseFunctionsBackend unary element-wise function
TargetInfoTarget-specific information
Parameters
[in]nodeNode to create the backend function for
Returns
Backend unary element-wise operation layer function

Definition at line 1090 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR, ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_INFO, arm_compute::graph::Exp, and arm_compute::test::validation::input.

1091 {
1092  validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1093 
1094  // Extract IO and info
1095  typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1096  typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1097  const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
1098 
1099  ARM_COMPUTE_ERROR_ON(input == nullptr);
1100  ARM_COMPUTE_ERROR_ON(output == nullptr);
1101 
1102  std::unique_ptr<IFunction> func = nullptr;
1103  std::string func_name;
1104  if(eltwise_op == UnaryEltwiseOperation::Exp)
1105  {
1106  std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
1107  std::string("Exp"),
1108  input, output);
1109  }
1110  else
1111  {
1112  ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
1113  }
1114 
1115  // Log info
1116  ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1117  << node.name()
1118  << " Type: " << node.type()
1119  << " Target: " << TargetInfo::TargetType
1120  << " Operation: " << func_name
1121  << " Data Type: " << input->info()->data_type()
1122  << " Shape: " << input->info()->tensor_shape()
1123  << std::endl);
1124 
1125  return std::move(func);
1126 }
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
Definition: Logger.h:54
UnaryEltwiseOperation
Supported Unary Element-wise operations.
Definition: Types.h:120

◆ get_backing_tensor()

TargetInfo::TensorType* arm_compute::graph::backends::detail::get_backing_tensor ( arm_compute::graph::Tensor tensor)

Returns backing tensor of a given tensor.

Template Parameters
TargetInfoTarget information
Parameters
[in]tensorTensor to extract the backing tensor from
Returns
Backing tensor if present else nullptr

Definition at line 62 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, Tensor::desc(), Tensor::handle(), and TensorDescriptor::target.

63 {
64  typename TargetInfo::TensorType *backing_tensor = nullptr;
65  if(tensor != nullptr)
66  {
67  ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
68  // Get backing tensor handle
69  ITensorHandle *tensor_handle = tensor->handle();
70  // Get backing tensor
71  backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
72  }
73 
74  return backing_tensor;
75 }
ITensorHandle * handle()
Backend tensor handle accessor.
Definition: Tensor.cpp:55
TensorType
Memory type.
Definition: Types.h:38
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
TensorDescriptor & desc()
TensorInfo metadata accessor.
Definition: Tensor.cpp:40

◆ get_backing_tensor_info()

arm_compute::ITensorInfo* arm_compute::graph::backends::detail::get_backing_tensor_info ( arm_compute::graph::Tensor tensor)
inline

Returns backing tensor info of a given tensor.

Parameters
[in]tensorTensor to extract the backing tensor from
Returns
Backing tensor tensor info if present else nullptr

Definition at line 50 of file ValidateHelpers.h.

References Tensor::handle(), ITensor::info(), and ITensorHandle::tensor().

Referenced by validate_arg_min_max_layer(), validate_bounding_box_transform_layer(), validate_channel_shuffle_layer(), validate_convolution_layer(), validate_depth_to_space_layer(), validate_depthwise_convolution_layer(), validate_dequantization_layer(), validate_detection_output_layer(), validate_detection_post_process_layer(), validate_eltwise_Layer(), validate_fused_convolution_with_post_op(), validate_generate_proposals_layer(), validate_l2_normalize_layer(), validate_normalize_planar_yuv_layer(), validate_pad_layer(), validate_permute_layer(), validate_prelu_layer(), validate_priorbox_layer(), validate_quantization_layer(), validate_reduction_operation_layer(), validate_reorg_layer(), validate_reshape_layer(), validate_roi_align_layer(), validate_slice_layer(), validate_strided_slice_layer(), and validate_unary_eltwise_layer().

51 {
52  return ((tensor == nullptr) || (tensor->handle() == nullptr)) ? nullptr : tensor->handle()->tensor().info();
53 }
ITensorHandle * handle()
Backend tensor handle accessor.
Definition: Tensor.cpp:55
virtual arm_compute::ITensor & tensor()=0
Backend tensor object accessor.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor&#39;s metadata.

◆ validate_arg_min_max_layer()

Status arm_compute::graph::backends::detail::validate_arg_min_max_layer ( ArgMinMaxLayerNode node)

Validates a ArgMinMax layer node.

Template Parameters
ArgMinMaxlayer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 64 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, ArgMinMaxLayerNode::axis(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), ArgMinMaxLayerNode::reduction_operation(), and arm_compute::experimental::dynamic_fusion::validate().

65 {
66  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ArgMinMaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
67  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
68  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
69 
70  // Extract IO and info
72  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
73 
74  // Validate function
75  return ArgMinMaxLayer::validate(input, node.axis(), output, node.reduction_operation());
76 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_bounding_box_transform_layer()

Status arm_compute::graph::backends::detail::validate_bounding_box_transform_layer ( BoundingBoxTransformLayerNode node)

Validates a Bounding Box Transform layer node.

Template Parameters
BoundingBoxTransformLayerBounding Box Transform layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 87 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), BoundingBoxTransformLayerNode::info(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

88 {
89  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating BoundingBoxTransformLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
90  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
91  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
92 
93  // Extract IO and info
95  arm_compute::ITensorInfo *deltas = get_backing_tensor_info(node.input(1));
96  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
97  const BoundingBoxTransformInfo bbox_info = node.info();
98 
99  return BoundingBoxTransformLayer::validate(input, output, deltas, bbox_info);
100 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_channel_shuffle_layer()

Status arm_compute::graph::backends::detail::validate_channel_shuffle_layer ( ChannelShuffleLayerNode node)

Validates a Channel Shuffle layer node.

Template Parameters
ChannelShuffleLayerChannel Shuffle layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 111 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), ChannelShuffleLayerNode::num_groups(), arm_compute::test::validation::num_groups, INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

112 {
113  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ChannelShuffle node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
114  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
115  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
116 
117  // Extract IO and info
119  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
120  const unsigned int num_groups = node.num_groups();
121 
122  return ChannelShuffleLayer::validate(input, output, num_groups);
123 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
const unsigned int num_groups
Definition: Im2Col.cpp:153
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_convolution_layer()

Status arm_compute::graph::backends::detail::validate_convolution_layer ( ConvolutionLayerNode node)

Validates a Convolution layer node.

Template Parameters
ConvolutionLayerDefault Convolution layer function type
DirectConvolutionLayerDirect Convolution layer function type
GEMMConvolutionLayerGEMM Convolution layer function type
WinogradConvolutionLayerWinograd Convolution layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 137 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_MSG, ARM_COMPUTE_RETURN_ERROR_ON, ARM_COMPUTE_RETURN_ERROR_ON_MSG, arm_compute::test::validation::conv_info, ConvolutionLayerNode::convolution_info(), ConvolutionLayerNode::convolution_method(), arm_compute::graph::Default, arm_compute::graph::Direct, arm_compute::graph::Enabled, ConvolutionLayerNode::fast_math_hint(), arm_compute::graph::GEMM, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), INode::name(), ConvolutionLayerNode::num_groups(), arm_compute::test::validation::num_groups, INode::num_inputs(), INode::num_outputs(), INode::output(), arm_compute::S32, ITensorInfo::set_data_type(), arm_compute::experimental::dynamic_fusion::validate(), and arm_compute::graph::Winograd.

Referenced by CLNodeValidator::validate(), and NENodeValidator::validate().

138 {
139  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
140  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
141  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
142 
143  // Extract IO and info
145  arm_compute::ITensorInfo *weights = get_backing_tensor_info(node.input(1));
146  arm_compute::ITensorInfo *biases = get_backing_tensor_info(node.input(2));
147  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
148 
149  if(is_data_type_quantized_asymmetric(input->data_type()))
150  {
151  biases->set_data_type(DataType::S32);
152  }
153 
154  const PadStrideInfo conv_info = node.convolution_info();
155  const ConvolutionMethod conv_algorithm = node.convolution_method();
156  const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
157  const unsigned int num_groups = node.num_groups();
158 
159  // Validate function
160  Status status{};
161  switch(conv_algorithm)
162  {
163  case ConvolutionMethod::Direct:
164  ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
165  status = DirectConvolutionLayer::validate(input, weights, biases, output, conv_info);
166  break;
168  status = GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info,
169  WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), num_groups);
170  break;
171  case ConvolutionMethod::Winograd:
172  ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
173  status = WinogradConvolutionLayer::validate(input, weights, biases, output, conv_info, ActivationLayerInfo(), fast_math);
174  break;
175  case ConvolutionMethod::Default:
176  status = ConvolutionLayer::validate(input, weights, biases, output, conv_info,
177  WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), fast_math, num_groups);
178  break;
179  default:
180  ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported convolution method");
181  }
182 
183  return status;
184 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
ConvolutionMethod
Available ConvolutionMethod.
Definition: Types.h:134
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
const unsigned int num_groups
Definition: Im2Col.cpp:153
virtual ITensorInfo & set_data_type(DataType data_type)=0
Set the data type to the specified value.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1052
GEMM CL kernel type.
Definition: CLTypes.h:86
#define ARM_COMPUTE_RETURN_ERROR_MSG(...)
An error is returned with the given description.
Definition: Error.h:194
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)
If the condition is true, an error is returned.
Definition: Error.h:244

◆ validate_depth_to_space_layer()

Status arm_compute::graph::backends::detail::validate_depth_to_space_layer ( DepthToSpaceLayerNode node)

Validates a depth to space layer node.

Template Parameters
DequantizationLayerDequantize layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 270 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, DepthToSpaceLayerNode::block_shape(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

271 {
272  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
273  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
274  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
275 
276  // Extract IO and info
278  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
279 
280  return DepthToSpaceLayer::validate(input, output, node.block_shape());
281 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_depthwise_convolution_layer()

Status arm_compute::graph::backends::detail::validate_depthwise_convolution_layer ( DepthwiseConvolutionLayerNode node)

Validates a Depthwise Convolution layer node.

Template Parameters
DepthwiseConvolutionLayerDefault Depthwise Convolution layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 231 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_MSG, ARM_COMPUTE_RETURN_ERROR_ON, arm_compute::test::validation::conv_info, DepthwiseConvolutionLayerNode::convolution_info(), arm_compute::graph::Default, DepthwiseConvolutionLayerNode::depth_multiplier(), DepthwiseConvolutionLayerNode::depthwise_convolution_method(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), arm_compute::graph::Optimized3x3, INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

232 {
233  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
234  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
235  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
236 
237  // Extract IO and info
240  arm_compute::ITensorInfo *biases = get_backing_tensor_info(node.input(2));
241  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
242 
243  const PadStrideInfo conv_info = node.convolution_info();
244  const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
245  const int depth_multiplier = node.depth_multiplier();
246 
247  // Validate function
248  Status status{};
249  switch(dwc_algorithm)
250  {
251  case DepthwiseConvolutionMethod::Default:
252  case DepthwiseConvolutionMethod::Optimized3x3:
253  status = DepthwiseConvolutionLayer::validate(input, weights, biases, output, conv_info, depth_multiplier);
254  break;
255  default:
256  ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported depthwise convolution method");
257  }
258 
259  return status;
260 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
DepthwiseConvolutionMethod
Supported Depthwise Convolution layer methods.
Definition: Types.h:135
#define ARM_COMPUTE_RETURN_ERROR_MSG(...)
An error is returned with the given description.
Definition: Error.h:194
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_dequantization_layer()

Status arm_compute::graph::backends::detail::validate_dequantization_layer ( DequantizationLayerNode node)

Validates a dequantize layer node.

Template Parameters
DequantizationLayerDequantize layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 291 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

292 {
293  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
294  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
295  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
296 
297  // Extract IO and info
299  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
300 
301  return DequantizationLayer::validate(input, output);
302 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_detection_output_layer()

Status arm_compute::graph::backends::detail::validate_detection_output_layer ( DetectionOutputLayerNode node)

Validates a detection output layer node.

Template Parameters
DetectionOutputLayerDetectionOutput layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 312 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, DetectionOutputLayerNode::detection_output_info(), get_backing_tensor_info(), INode::id(), INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

313 {
314  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
315  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
316  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
317 
318  // Extract IO and info
319  arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
320  arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
321  arm_compute::ITensorInfo *input2 = get_backing_tensor_info(node.input(2));
322  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
323  const DetectionOutputLayerInfo detect_info = node.detection_output_info();
324 
325  return DetectionOutputLayer::validate(input0, input1, input2, output, detect_info);
326 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_detection_post_process_layer()

Status arm_compute::graph::backends::detail::validate_detection_post_process_layer ( DetectionPostProcessLayerNode node)

Validates a detection post process layer node.

Template Parameters
DetectionPostProcessLayerDetectionOutput layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 336 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, DetectionPostProcessLayerNode::detection_post_process_info(), get_backing_tensor_info(), INode::id(), INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

337 {
338  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionPostProcessLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
339  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
340  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 4);
341 
342  // Extract IO and info
343  arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
344  arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
345  arm_compute::ITensorInfo *input2 = get_backing_tensor_info(node.input(2));
346  arm_compute::ITensorInfo *output0 = get_backing_tensor_info(node.output(0));
347  arm_compute::ITensorInfo *output1 = get_backing_tensor_info(node.output(1));
348  arm_compute::ITensorInfo *output2 = get_backing_tensor_info(node.output(2));
349  arm_compute::ITensorInfo *output3 = get_backing_tensor_info(node.output(3));
350  const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
351 
352  return DetectionPostProcessLayer::validate(input0, input1, input2, output0, output1, output2, output3, detect_info);
353 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_eltwise_Layer()

Status arm_compute::graph::backends::detail::validate_eltwise_Layer ( EltwiseLayerNode node)

Validates a element-wise layer node.

Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 700 of file ValidateHelpers.h.

References arm_compute::graph::Add, ARM_COMPUTE_ERROR, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, EltwiseLayerNode::convert_policy(), arm_compute::graph::Div, EltwiseLayerNode::eltwise_operation(), EltwiseLayerNode::fused_activation(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::graph::Max, arm_compute::graph::Mul, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), EltwiseLayerNode::output_quant_info(), EltwiseLayerNode::rounding_policy(), arm_compute::graph::Sub, and arm_compute::experimental::dynamic_fusion::validate().

701 {
702  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
703  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
704  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
705 
706  // Extract input and output
707  const arm_compute::ITensorInfo *input1 = detail::get_backing_tensor_info(node.input(0));
708  const arm_compute::ITensorInfo *input2 = detail::get_backing_tensor_info(node.input(1));
709  const arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
710  const EltwiseOperation eltwise_op = node.eltwise_operation();
711  const ConvertPolicy convert_policy = node.convert_policy();
712  const RoundingPolicy round_policy = node.rounding_policy();
713  const ActivationLayerInfo act_info = node.fused_activation();
714  const QuantizationInfo quant_info = node.output_quant_info();
715 
716  // Validate function
717  if(eltwise_op == EltwiseOperation::Add)
718  {
719  return EltwiseLayerFunctions::ArithmeticAddition::validate(input1, input2, output, convert_policy, act_info);
720  }
721  else if(eltwise_op == EltwiseOperation::Sub)
722  {
723  return EltwiseLayerFunctions::ArithmeticSubtraction::validate(input1, input2, output, convert_policy, act_info);
724  }
725  else if(eltwise_op == EltwiseOperation::Mul)
726  {
727  return EltwiseLayerFunctions::PixelWiseMultiplication::validate(input1, input2, output, 1.0f, convert_policy, round_policy, act_info);
728  }
729  else if(eltwise_op == EltwiseOperation::Max)
730  {
731  return EltwiseLayerFunctions::ElementwiseMax::validate(input1, input2, output, act_info);
732  }
733  else if(eltwise_op == EltwiseOperation::Div)
734  {
735  return EltwiseLayerFunctions::ArithmeticDivision::validate(input1, input2, output, act_info);
736  }
737  else
738  {
739  ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
740  }
741  return Status{};
742 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
EltwiseOperation
Supported Element-wise operations.
Definition: Types.h:109
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
RoundingPolicy
Rounding method.
Definition: Rounding.h:30
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
ConvertPolicy
Policy to handle integer overflow.
Definition: Types.h:404

◆ validate_fused_convolution_with_post_op()

Status arm_compute::graph::backends::detail::validate_fused_convolution_with_post_op ( FusedConvolutionWithPostOpNode node)

Validates a Convolution layer node.

Template Parameters
GEMMConvolutionLayerGEMM Convolution layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 195 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, arm_compute::test::validation::conv_info, FusedConvolutionWithPostOpNode::convolution_info(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, arm_compute::is_data_type_quantized_asymmetric(), INode::name(), FusedConvolutionWithPostOpNode::num_groups(), arm_compute::test::validation::num_groups, INode::num_inputs(), INode::num_outputs(), INode::output(), arm_compute::S32, ITensorInfo::set_data_type(), and arm_compute::experimental::dynamic_fusion::validate().

196 {
197  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating fused ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
198  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 4);
199  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
200 
201  // Extract IO and info
203  arm_compute::ITensorInfo *weights = get_backing_tensor_info(node.input(1));
204  arm_compute::ITensorInfo *biases = get_backing_tensor_info(node.input(2));
205  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
206 
207  if(is_data_type_quantized_asymmetric(input->data_type()))
208  {
209  biases->set_data_type(DataType::S32);
210  }
211 
212  const PadStrideInfo conv_info = node.convolution_info();
213  //const ConvolutionMethod conv_algorithm = node.convolution_method();
214  //const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
215  const unsigned int num_groups = node.num_groups();
216 
217  // Validate function
218  return GEMMConvolutionLayer::validate(input, weights, biases, output, conv_info,
219  WeightsInfo(), Size2D(1, 1), ActivationLayerInfo(), num_groups);
220 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
const unsigned int num_groups
Definition: Im2Col.cpp:153
virtual ITensorInfo & set_data_type(DataType data_type)=0
Set the data type to the specified value.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
Definition: Utils.h:1052
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_generate_proposals_layer()

Status arm_compute::graph::backends::detail::validate_generate_proposals_layer ( GenerateProposalsLayerNode node)

Validates a Generate Proposals layer node.

Template Parameters
GenerateProposalsLayerGenerate Proposals layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 364 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), GenerateProposalsLayerNode::info(), arm_compute::test::validation::info, INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

365 {
366  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating GenerateProposalsLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
367  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
368  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 3);
369 
370  // Extract IO and info
374  arm_compute::ITensorInfo *proposals = get_backing_tensor_info(node.output(0));
375  arm_compute::ITensorInfo *scores_out = get_backing_tensor_info(node.output(1));
376  arm_compute::ITensorInfo *num_valid_proposals = get_backing_tensor_info(node.output(2));
377  const GenerateProposalsInfo info = node.info();
378 
379  return GenerateProposalsLayer::validate(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
380 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_l2_normalize_layer()

Status arm_compute::graph::backends::detail::validate_l2_normalize_layer ( L2NormalizeLayerNode node)

Validates a L2Normalization layer node.

Template Parameters
L2Normalizationlayer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 391 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, L2NormalizeLayerNode::axis(), arm_compute::quantization::epsilon, L2NormalizeLayerNode::epsilon(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

392 {
393  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating L2NormalizeLayerNode node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
394  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
395  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
396 
397  // Extract IO and info
399  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
400  int axis = node.axis();
401  float epsilon = node.epsilon();
402 
403  // Validate function
404  return L2NormalizeLayer::validate(input, output, axis, epsilon);
405 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_node()

void arm_compute::graph::backends::detail::validate_node ( const INode node,
size_t  num_expected_inputs,
size_t  num_expected_outputs 
)

Definition at line 78 of file FunctionHelpers.h.

References ARM_COMPUTE_ERROR_ON, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_UNUSED, INode::assigned_target(), INode::id(), INode::name(), INode::num_inputs(), INode::num_outputs(), and INode::type().

79 {
80  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
81  << " Target: " << TargetInfo::TargetType
82  << " ID: " << node.id()
83  << node.name()
84  << std::endl);
85 
86  ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
87  ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
88  ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
89  ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
90 }
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
Definition: Error.h:466
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
Definition: Error.h:152
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_normalize_planar_yuv_layer()

Status arm_compute::graph::backends::detail::validate_normalize_planar_yuv_layer ( NormalizePlanarYUVLayerNode node)

Validates a NormalizePlanarYUV layer node.

Template Parameters
NormalizePlanarYUVLayerlayer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 416 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

417 {
418  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating NormalizePlanarYUVLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
419  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
420  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
421 
422  // Extract IO and info
426  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
427 
428  // Validate function
429  return NormalizePlanarYUVLayer::validate(input, output, mean, std);
430 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_pad_layer()

Status arm_compute::graph::backends::detail::validate_pad_layer ( PadLayerNode node)

Validates a pad layer node.

Template Parameters
PadLayerPad layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 441 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), PadLayerNode::padding(), and arm_compute::experimental::dynamic_fusion::validate().

442 {
443  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PadLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
444  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
445  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
446 
447  // Extract IO and info
449  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
450  const PaddingList &padding = node.padding();
451 
452  return PadLayer::validate(input, output, padding);
453 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
std::vector< PaddingInfo > PaddingList
List of padding information.
Definition: Types.h:453
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_permute_layer()

Status arm_compute::graph::backends::detail::validate_permute_layer ( PermuteLayerNode node)

Validates a permute layer node.

Template Parameters
PermuteLayerPermute layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 464 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), PermuteLayerNode::permutation_vector(), and arm_compute::experimental::dynamic_fusion::validate().

465 {
466  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PermuteLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
467  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
468  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
469 
470  // Extract IO and info
472  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
473  const PermutationVector &perm = node.permutation_vector();
474 
475  return PermuteLayer::validate(input, output, perm);
476 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Strides PermutationVector
Permutation vector.
Definition: Types.h:51
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_prelu_layer()

Status arm_compute::graph::backends::detail::validate_prelu_layer ( PReluLayerNode node)

Validates a PRelu layer node.

Template Parameters
PReluLayerPRelu layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 487 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

488 {
489  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PRelu node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
490  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
491  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
492 
493  // Extract IO and info
495  arm_compute::ITensorInfo *alpha = get_backing_tensor_info(node.input(1));
496  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
497 
498  return PReluLayer::validate(input, alpha, output);
499 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_priorbox_layer()

Status arm_compute::graph::backends::detail::validate_priorbox_layer ( PriorBoxLayerNode node)

Validates a priorbox layer node.

Template Parameters
PriorBoxLayerPriorBox layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 510 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), PriorBoxLayerNode::priorbox_info(), and arm_compute::experimental::dynamic_fusion::validate().

511 {
512  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PriorBoxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
513  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
514  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
515 
516  // Extract IO and info
517  arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
518  arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
519  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
520  const PriorBoxLayerInfo prior_info = node.priorbox_info();
521 
522  return PriorBoxLayer::validate(input0, input1, output, prior_info);
523 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_quantization_layer()

Status arm_compute::graph::backends::detail::validate_quantization_layer ( QuantizationLayerNode node)

Validates a Quantization layer node.

Template Parameters
QuantizationLayerQuantization layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 534 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

535 {
536  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating QuantizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
537  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
538  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
539 
540  // Extract input and output
542  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
543 
544  // Validate function
545  return QuantizationLayer::validate(input, output);
546 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_reduction_operation_layer()

Status arm_compute::graph::backends::detail::validate_reduction_operation_layer ( ReductionLayerNode node)

Validates a Reduction operation layer node.

Template Parameters
ReductionLayerReduction layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 557 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, ReductionLayerNode::axis(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, ReductionLayerNode::keep_dims(), INode::name(), INode::num_inputs(), INode::num_outputs(), ReductionLayerNode::op(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

558 {
559  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReductionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
560 
561  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
562  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
563 
564  // Extract input and output
566  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
567 
568  // Validate function
569  return ReductionLayer::validate(input, output, node.axis(), node.op(), node.keep_dims());
570 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_reorg_layer()

Status arm_compute::graph::backends::detail::validate_reorg_layer ( ReorgLayerNode node)

Validates a Reorg layer node.

Template Parameters
ReorgLayerReorg layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 581 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), ReorgLayerNode::stride(), and arm_compute::experimental::dynamic_fusion::validate().

582 {
583  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReorgLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
584  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
585  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
586 
587  // Extract input and output
589  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
590 
591  // Validate function
592  return ReorgLayer::validate(input, output, node.stride());
593 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_reshape_layer()

Status arm_compute::graph::backends::detail::validate_reshape_layer ( ReshapeLayerNode node)

Validates a Reshape layer node.

Template Parameters
ReshapeLayerReshape layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 604 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

605 {
606  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ReshapeLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
607  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
608  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
609 
610  // Extract input and output
613 
614  // Validate function
615  return ReshapeLayer::validate(input, output);
616 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_roi_align_layer()

Status arm_compute::graph::backends::detail::validate_roi_align_layer ( ROIAlignLayerNode node)

Validates a ROI Align layer node.

Template Parameters
ROIAlignLayerROIAlign layer type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 627 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), ROIAlignLayerNode::pooling_info(), and arm_compute::experimental::dynamic_fusion::validate().

628 {
629  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating ROIAlignLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
630  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
631  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
632 
633  // Extract input and output
637  const ROIPoolingLayerInfo &pool_info = node.pooling_info();
638 
639  // Validate function
640  return ROIAlignLayer::validate(input, rois, output, pool_info);
641 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_slice_layer()

Status arm_compute::graph::backends::detail::validate_slice_layer ( SliceLayerNode node)

Validates a Slice layer node.

Template Parameters
SliceLayerSlice layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 652 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, SliceLayerNode::ends(), get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), SliceLayerNode::starts(), and arm_compute::experimental::dynamic_fusion::validate().

653 {
654  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating Slice node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
655  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
656  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
657 
658  // Extract IO and info
660  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
661  const Coordinates starts = node.starts();
662  const Coordinates ends = node.ends();
663 
664  return SliceLayer::validate(input, output, starts, ends);
665 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_strided_slice_layer()

Status arm_compute::graph::backends::detail::validate_strided_slice_layer ( StridedSliceLayerNode node)

Validates a Strided Slice layer node.

Template Parameters
StridedSliceLayerStrided Slice layer function type
Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 676 of file ValidateHelpers.h.

References ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, StridedSliceLayerInfo::begin_mask(), StridedSliceLayerInfo::end_mask(), StridedSliceLayerNode::ends(), get_backing_tensor_info(), INode::id(), arm_compute::test::validation::info, INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), INode::output(), StridedSliceLayerInfo::shrink_axis_mask(), StridedSliceLayerNode::starts(), StridedSliceLayerNode::strided_slice_info(), StridedSliceLayerNode::strides(), and arm_compute::experimental::dynamic_fusion::validate().

677 {
678  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating StridedSlice node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
679  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
680  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
681 
682  // Extract IO and info
684  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
685  const Coordinates starts = node.starts();
686  const Coordinates ends = node.ends();
687  const BiStrides strides = node.strides();
688  const StridedSliceLayerInfo info = node.strided_slice_info();
689 
690  return StridedSliceLayer::validate(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
691 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
Coordinates BiStrides
Bidirectional strides.
Definition: Types.h:53
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50

◆ validate_unary_eltwise_layer()

Status arm_compute::graph::backends::detail::validate_unary_eltwise_layer ( UnaryEltwiseLayerNode node)

Validates a unary element-wise layer node.

Parameters
[in]nodeNode to validate
Returns
Status

Definition at line 750 of file ValidateHelpers.h.

References ARM_COMPUTE_ERROR, ARM_COMPUTE_LOG_GRAPH_VERBOSE, ARM_COMPUTE_RETURN_ERROR_ON, UnaryEltwiseLayerNode::eltwise_descriptor(), arm_compute::graph::Exp, get_backing_tensor_info(), INode::id(), INode::input(), arm_compute::test::validation::input, INode::name(), INode::num_inputs(), INode::num_outputs(), UnaryEltwiseLayerDescriptor::op, INode::output(), and arm_compute::experimental::dynamic_fusion::validate().

751 {
752  ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
753  ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
754  ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
755 
756  // Extract input and output
758  arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
759  const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
760 
761  // Validate function
762  if(eltwise_op == UnaryEltwiseOperation::Exp)
763  {
765  }
766  else
767  {
768  ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
769  }
770 
771  return Status{};
772 }
Status validate(const OperatorGraph &op_graph)
Return the validity of op_graph, usually after performing an operation (e.g.
arm_compute::ITensorInfo * get_backing_tensor_info(arm_compute::graph::Tensor *tensor)
Returns backing tensor info of a given tensor.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
Definition: Error.h:352
Store the tensor&#39;s metadata.
Definition: ITensorInfo.h:40
#define ARM_COMPUTE_RETURN_ERROR_ON(cond)
If the condition is true, an error is returned.
Definition: Error.h:296
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
Definition: Logger.h:50
UnaryEltwiseOperation
Supported Unary Element-wise operations.
Definition: Types.h:120