24 #ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H 25 #define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H 58 template <
typename TargetInfo>
68 backing_tensor = (tensor_handle !=
nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) :
nullptr;
71 return backing_tensor;
74 template <
typename TargetInfo>
78 <<
" Target: " << TargetInfo::TargetType
79 <<
" ID: " << node.
id()
98 template <
typename ActivationLayerFunction,
typename TargetInfo>
101 validate_node<TargetInfo>(node, 1 , 1 );
109 auto func = std::make_unique<ActivationLayerFunction>();
110 func->configure(input, output, act_info);
114 <<
" Type: " << node.type()
115 <<
" Target: " << TargetInfo::TargetType
116 <<
" Data Type: " << input->info()->data_type()
117 <<
" Shape: " << input->info()->tensor_shape()
118 <<
" Activation function: " << act_info.activation()
119 <<
" a: " << act_info.a()
120 <<
" b: " << act_info.b()
124 return std::move(
func);
136 template <
typename ArgMinMaxLayerFunction,
typename TargetInfo>
139 validate_node<TargetInfo>(node, 1 , 1 );
145 unsigned int axis = node.axis();
148 auto func = std::make_unique<ArgMinMaxLayerFunction>();
149 func->configure(input, axis, output, op);
153 <<
" Type: " << node.type()
154 <<
" Target: " << TargetInfo::TargetType
155 <<
" Data Type: " << input->info()->data_type()
156 <<
" Shape: " << input->info()->tensor_shape()
157 <<
" Reduction Operation: " << op
161 return std::move(
func);
173 template <
typename BatchNormalizationLayerFunction,
typename TargetInfo>
176 validate_node<TargetInfo>(node, 5 , 1 );
186 const float epsilon = node.epsilon();
190 auto func = std::make_unique<BatchNormalizationLayerFunction>();
191 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
196 <<
" Type: " << node.type()
197 <<
" Target: " << TargetInfo::TargetType
198 <<
" Data Type: " << input->info()->data_type()
199 <<
" Shape: " << input->info()->tensor_shape()
200 <<
" Epsilon: " << epsilon <<
" " 205 return std::move(
func);
218 template <
typename FusedLayerTypes,
typename TargetInfo>
221 validate_node<TargetInfo>(node, 7 , 1 );
235 const unsigned int num_groups = node.num_groups();
238 const float epsilon = node.epsilon();
242 std::unique_ptr<IFunction>
func;
243 std::string func_name;
248 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
249 std::string(
"FusedConvolutionBatchNormalizationLayer"), mm,
input, weights, biases, output, mean, var, beta, gamma,
epsilon,
conv_info,
num_groups, fast_math, fused_act);
254 <<
" Type: " << node.type()
255 <<
" Target: " << TargetInfo::TargetType
256 <<
" Data Type: " << input->info()->data_type()
257 <<
" Input shape: " << input->info()->tensor_shape()
258 <<
" Weights shape: " << weights->info()->tensor_shape()
259 <<
" Output shape: " << output->info()->tensor_shape()
262 return std::move(func);
275 template <
typename FusedLayerTypes,
typename TargetInfo>
278 validate_node<TargetInfo>(node, 7 , 1 );
292 const unsigned int depth_multiplier = node.depth_multiplier();
294 const float epsilon = node.epsilon();
298 std::unique_ptr<IFunction>
func;
299 std::string func_name;
304 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
305 std::string(
"FusedDepthwiseConvolutionBatchNormalizationLayer"), mm,
input, weights, biases, output, mean, var, beta, gamma,
epsilon,
conv_info, depth_multiplier, fused_act);
310 <<
" Type: " << node.type()
311 <<
" Target: " << TargetInfo::TargetType
312 <<
" Data Type: " << input->info()->data_type()
313 <<
" Input shape: " << input->info()->tensor_shape()
314 <<
" Weights shape: " << weights->info()->tensor_shape()
315 <<
" Output shape: " << output->info()->tensor_shape()
318 return std::move(func);
330 template <
typename BoundingBoxTransformLayerFunction,
typename TargetInfo>
333 validate_node<TargetInfo>(node, 2 , 1 );
342 auto func = std::make_unique<BoundingBoxTransformLayerFunction>();
343 func->configure(input, output, deltas, bbox_info);
348 <<
" Type: " << node.type()
349 <<
" Target: " << TargetInfo::TargetType
350 <<
" Data Type: " << input->info()->data_type()
351 <<
" Shape: " << input->info()->tensor_shape()
352 <<
" BoundingBox Info img W: " << bbox_info.img_width() <<
" " 353 <<
" BoundingBox Info img H: " << bbox_info.img_height() <<
" " 356 return std::move(
func);
368 template <
typename ChannelShuffleLayerFunction,
typename TargetInfo>
371 validate_node<TargetInfo>(node, 1 , 1 );
376 const unsigned int num_groups = node.num_groups();
379 auto func = std::make_unique<ChannelShuffleLayerFunction>();
380 func->configure(input, output, num_groups);
384 <<
" Type: " << node.type()
385 <<
" Target: " << TargetInfo::TargetType
386 <<
" Data Type: " << input->info()->data_type()
387 <<
" Shape: " << input->info()->tensor_shape()
388 <<
" Num groups: " << num_groups
391 return std::move(
func);
403 template <
typename ConcatenateLayerFunction,
typename TargetInfo>
416 std::vector<typename TargetInfo::SrcTensorType *> inputs;
417 for(
unsigned int i = 0; i < node.
num_inputs(); ++i)
419 inputs.push_back(get_backing_tensor<TargetInfo>(node.
input(i)));
426 auto func = std::make_unique<ConcatenateLayerFunction>();
427 func->configure(inputs, output, concat_axis);
431 std::ostringstream qss;
434 qss <<
" Output QuantInfo: " << output->info()->quantization_info();
438 <<
" Type: " << node.
type()
439 <<
" Target: " << TargetInfo::TargetType
440 <<
" Data Type: " << output->info()->data_type()
441 <<
" Shape: " << output->info()->tensor_shape()
442 <<
" Num Inputs: " << inputs.size()
443 <<
" Axis: " << concat_axis
447 return std::move(func);
460 template <
typename ConvolutionLayerFunctions,
typename TargetInfo>
463 validate_node<TargetInfo>(node, 3 , 1 );
479 const unsigned int num_groups = node.num_groups();
486 std::unique_ptr<IFunction>
func;
487 std::string func_name;
492 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
493 std::string(
"WinogradConvolutionLayer"), mm,
499 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
500 std::string(
"DirectConvolutionLayer"),
505 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
506 std::string(
"GEMMConvolutionLayer"), mm,
512 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
513 std::string(
"GenericConvolutionLayer"), mm,
519 std::ostringstream qss;
522 qss <<
" Input QuantInfo: " << input->info()->quantization_info()
523 <<
" Weights QuantInfo: " << weights->info()->quantization_info()
524 <<
" Output QuantInfo: " << output->info()->quantization_info();
528 <<
" Type: " << func_name
529 <<
" Target: " << TargetInfo::TargetType
530 <<
" Data Type: " << input->info()->data_type()
531 <<
" Groups: " << num_groups
532 <<
" Input shape: " << input->info()->tensor_shape()
533 <<
" Weights shape: " << weights->info()->tensor_shape()
534 <<
" Output shape: " << output->info()->tensor_shape()
538 return std::move(func);
551 template <
typename DeconvolutionLayerFunction,
typename TargetInfo>
554 validate_node<TargetInfo>(node, 3 , 1 );
566 std::unique_ptr<IFunction>
func;
568 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
570 input, weights, biases, output, deconv_info);
575 <<
" Type: " << node.type()
576 <<
" Target: " << TargetInfo::TargetType
577 <<
" Data Type: " << input->info()->data_type()
578 <<
" Input shape: " << input->info()->tensor_shape()
579 <<
" Weights shape: " << weights->info()->tensor_shape()
580 <<
" Output shape: " << output->info()->tensor_shape()
594 template <
typename DepthwiseConvolutionLayer,
typename TargetInfo>
597 validate_node<TargetInfo>(node, 3 , 1 );
613 const unsigned int depth_multiplier = node.depth_multiplier();
617 std::unique_ptr<IFunction>
func;
618 std::string func_name;
620 std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
621 std::string(
"DepthwiseConvolutionLayer"),
622 input, weights, biases, output,
conv_info, depth_multiplier, fused_act);
625 std::ostringstream qss;
628 qss <<
" Input QuantInfo: " << input->info()->quantization_info()
629 <<
" Weights QuantInfo: " << weights->info()->quantization_info()
630 <<
" Output QuantInfo: " << output->info()->quantization_info();
634 <<
" Type: " << func_name
635 <<
" Target: " << TargetInfo::TargetType
636 <<
" Data Type: " << input->info()->data_type()
637 <<
" Input shape: " << input->info()->tensor_shape()
638 <<
" Weights shape: " << weights->info()->tensor_shape()
639 <<
" Output shape: " << output->info()->tensor_shape()
640 <<
" Depth multiplier: " << depth_multiplier
644 return std::move(func);
656 template <
typename DepthToSpaceLayerFunction,
typename TargetInfo>
659 validate_node<TargetInfo>(node, 1 , 1 );
669 auto func = std::make_unique<DepthToSpaceLayerFunction>();
670 func->configure(input, output, node.block_shape());
675 <<
" Type: " << node.type()
676 <<
" Target: " << TargetInfo::TargetType
677 <<
" Data Type: " << input->info()->data_type()
678 <<
" Input shape: " << input->info()->tensor_shape()
679 <<
" Block Size: " << node.block_shape()
680 <<
" Output shape: " << output->info()->tensor_shape()
683 return std::move(
func);
695 template <
typename DequantizationLayerFunction,
typename TargetInfo>
698 validate_node<TargetInfo>(node, 1 , 1 );
708 auto func = std::make_unique<DequantizationLayerFunction>();
709 func->configure(input, output);
714 <<
" Type: " << node.type()
715 <<
" Target: " << TargetInfo::TargetType
716 <<
" Data Type: " << input->info()->data_type()
717 <<
" Input shape: " << input->info()->tensor_shape()
718 <<
" Input quantization info: " << output->info()->quantization_info()
719 <<
" Output shape: " << output->info()->tensor_shape()
722 return std::move(
func);
733 template <
typename DetectionOutputLayerFunction,
typename TargetInfo>
736 validate_node<TargetInfo>(node, 3 , 1 );
751 auto func = std::make_unique<DetectionOutputLayerFunction>();
752 func->configure(input0, input1, input2, output, detect_info);
757 <<
" Type: " << node.type()
758 <<
" Target: " << TargetInfo::TargetType
759 <<
" Data Type: " << input0->info()->data_type()
760 <<
" Input0 shape: " << input0->info()->tensor_shape()
761 <<
" Input1 shape: " << input1->info()->tensor_shape()
762 <<
" Input2 shape: " << input2->info()->tensor_shape()
763 <<
" Output shape: " << output->info()->tensor_shape()
764 <<
" DetectionOutputLayer info: " << detect_info
767 return std::move(
func);
779 template <
typename DetectionPostProcessLayerFunction,
typename TargetInfo>
782 validate_node<TargetInfo>(node, 3 , 4 );
803 auto func = std::make_unique<DetectionPostProcessLayerFunction>();
804 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
809 <<
" Type: " << node.type()
810 <<
" Target: " << TargetInfo::TargetType
811 <<
" Data Type: " << input0->info()->data_type()
812 <<
" Input0 shape: " << input0->info()->tensor_shape()
813 <<
" Input1 shape: " << input1->info()->tensor_shape()
814 <<
" Input2 shape: " << input2->info()->tensor_shape()
815 <<
" Output0 shape: " << output0->info()->tensor_shape()
816 <<
" Output1 shape: " << output1->info()->tensor_shape()
817 <<
" Output2 shape: " << output2->info()->tensor_shape()
818 <<
" Output3 shape: " << output3->info()->tensor_shape()
819 <<
" DetectionPostProcessLayer info: " << detect_info
822 return std::move(
func);
834 template <
typename EltwiseFunctions,
typename TargetInfo>
837 validate_node<TargetInfo>(node, 2 , 1 );
850 std::unique_ptr<IFunction>
func =
nullptr;
851 std::string func_name;
854 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
855 std::string(
"ArithmeticAddition"),
856 input1, input2, output, convert_policy, act_info);
860 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
861 std::string(
"ArithmeticSubtraction"),
862 input1, input2, output, convert_policy, act_info);
866 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
867 std::string(
"PixelWiseMultiplication"),
868 input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
872 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
873 std::string(
"ElementwiseMaximum"),
874 input1, input2, output, act_info);
884 <<
" Type: " << node.type()
885 <<
" Target: " << TargetInfo::TargetType
886 <<
" Operation: " << func_name
887 <<
" Data Type: " << input1->info()->data_type()
888 <<
" Shape: " << input1->info()->tensor_shape()
891 return std::move(func);
903 template <
typename UnaryEltwiseFunctions,
typename TargetInfo>
906 validate_node<TargetInfo>(node, 1 , 1 );
916 std::unique_ptr<IFunction>
func =
nullptr;
917 std::string func_name;
920 std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
932 <<
" Type: " << node.type()
933 <<
" Target: " << TargetInfo::TargetType
934 <<
" Operation: " << func_name
935 <<
" Data Type: " << input->info()->data_type()
936 <<
" Shape: " << input->info()->tensor_shape()
939 return std::move(func);
951 template <
typename FlattenLayerFunction,
typename TargetInfo>
954 validate_node<TargetInfo>(node, 1 , 1 );
964 auto func = std::make_unique<FlattenLayerFunction>();
965 func->configure(input, output);
970 <<
" Type: " << node.type()
971 <<
" Target: " << TargetInfo::TargetType
972 <<
" Data Type: " << input->info()->data_type()
973 <<
" Input shape: " << input->info()->tensor_shape()
974 <<
" Output shape: " << output->info()->tensor_shape()
977 return std::move(
func);
990 template <
typename FullyConnectedLayerFunction,
typename TargetInfo>
993 validate_node<TargetInfo>(node, 3 , 1 );
1009 auto func = std::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
1010 func->configure(input, weights, biases, output, fc_info);
1015 std::ostringstream qss;
1018 qss <<
" Input QuantInfo: " << input->info()->quantization_info()
1019 <<
" Weights QuantInfo: " << weights->info()->quantization_info()
1020 <<
" Output QuantInfo: " << output->info()->quantization_info();
1024 <<
" Type: " << node.type()
1025 <<
" Target: " << TargetInfo::TargetType
1026 <<
" Data Type: " << input->info()->data_type()
1028 <<
" Input shape: " << input->info()->tensor_shape()
1029 <<
" Weights shape: " << weights->info()->tensor_shape()
1030 <<
" Output shape: " << output->info()->tensor_shape()
1033 return std::move(
func);
1046 template <
typename GenerateProposalsLayerFunction,
typename TargetInfo>
1049 validate_node<TargetInfo>(node, 3 , 3 );
1067 auto func = std::make_unique<GenerateProposalsLayerFunction>(
get_memory_manager(ctx, TargetInfo::TargetType));
1068 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1072 <<
" Target " << TargetInfo::TargetType
1073 <<
" Data Type: " << scores->info()->data_type()
1074 <<
" Scores shape: " << scores->info()->tensor_shape()
1075 <<
" Deltas shape: " << deltas->info()->tensor_shape()
1076 <<
" Anchors shape: " << anchors->info()->tensor_shape()
1077 <<
" Proposals shape: " << proposals->info()->tensor_shape()
1078 <<
" Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1079 <<
" Scores Out shape: " << scores_out->info()->tensor_shape()
1082 return std::move(
func);
1095 template <
typename L2NormalizeLayerFunction,
typename TargetInfo>
1098 validate_node<TargetInfo>(node, 1 , 1 );
1103 int axis = node.axis();
1104 float epsilon = node.epsilon();
1111 auto func = std::make_unique<L2NormalizeLayerFunction>(mm);
1112 func->configure(input, output, axis, epsilon);
1117 <<
" Type: " << node.type()
1118 <<
" Target: " << TargetInfo::TargetType
1119 <<
" Data Type: " << input->info()->data_type()
1120 <<
" Input shape: " << input->info()->tensor_shape()
1121 <<
" Output shape: " << output->info()->tensor_shape()
1122 <<
" Axis: " << axis
1123 <<
" Epsilon: " << epsilon
1126 return std::move(
func);
1139 template <
typename NormalizationLayerFunction,
typename TargetInfo>
1144 validate_node<TargetInfo>(node, 1 , 1 );
1154 auto func = std::make_unique<NormalizationLayerFunction>();
1155 func->configure(input, output, norm_info);
1160 <<
" Type: " << node.type()
1161 <<
" Target: " << TargetInfo::TargetType
1162 <<
" Data Type: " << input->info()->data_type()
1163 <<
" Input shape: " << input->info()->tensor_shape()
1164 <<
" Output shape: " << output->info()->tensor_shape()
1165 <<
" Normalization info: " << norm_info.type()
1168 return std::move(
func);
1180 template <
typename NormalizePlanarYUVLayerFunction,
typename TargetInfo>
1183 validate_node<TargetInfo>(node, 3 , 1 );
1196 auto func = std::make_unique<NormalizePlanarYUVLayerFunction>();
1197 func->configure(input, output, mean, std);
1202 <<
" Type: " << node.type()
1203 <<
" Target: " << TargetInfo::TargetType
1204 <<
" Data Type: " << input->info()->data_type()
1205 <<
" Shape: " << input->info()->tensor_shape()
1208 return std::move(
func);
1220 template <
typename PadLayerFunction,
typename TargetInfo>
1223 validate_node<TargetInfo>(node, 1 , 1 );
1229 const PixelValue pad_value = node.pad_value();
1234 auto func = std::make_unique<PadLayerFunction>();
1235 func->configure(input, output, padding, pad_value);
1240 <<
" Type: " << node.type()
1241 <<
" Target: " << TargetInfo::TargetType
1242 <<
" Data Type: " << input->info()->data_type()
1243 <<
" Input shape: " << input->info()->tensor_shape()
1244 <<
" Output shape: " << output->info()->tensor_shape()
1247 return std::move(
func);
1259 template <
typename PermuteLayerFunction,
typename TargetInfo>
1262 validate_node<TargetInfo>(node, 1 , 1 );
1272 auto func = std::make_unique<PermuteLayerFunction>();
1273 func->configure(input, output, perm);
1278 <<
" Type: " << node.type()
1279 <<
" Target: " << TargetInfo::TargetType
1280 <<
" Data Type: " << input->info()->data_type()
1281 <<
" Input shape: " << input->info()->tensor_shape()
1282 <<
" Output shape: " << output->info()->tensor_shape()
1283 <<
" Permutation vector: " << perm
1286 return std::move(
func);
1298 template <
typename PoolingLayerFunction,
typename TargetInfo>
1301 validate_node<TargetInfo>(node, 1 , 1 );
1311 auto func = std::make_unique<PoolingLayerFunction>();
1312 func->configure(input, output, pool_info);
1317 <<
" Type: " << node.type()
1318 <<
" Target: " << TargetInfo::TargetType
1319 <<
" Data Type: " << input->info()->data_type()
1320 <<
" Input shape: " << input->info()->tensor_shape()
1321 <<
" Output shape: " << output->info()->tensor_shape()
1322 <<
" Pooling info: " << pool_info.pool_type
1325 return std::move(
func);
1337 template <
typename PReluFunction,
typename TargetInfo>
1340 validate_node<TargetInfo>(node, 2 , 1 );
1350 auto func = std::make_unique<PReluFunction>();
1351 func->configure(input, alpha, output);
1356 <<
" Type: " << node.type()
1357 <<
" Target: " << TargetInfo::TargetType
1358 <<
" Data Type: " << input->info()->data_type()
1359 <<
" Input shape: " << input->info()->tensor_shape()
1360 <<
" Output shape: " << output->info()->tensor_shape()
1363 return std::move(
func);
1374 template <
typename TargetInfo>
1377 validate_node<TargetInfo>(node, 1 , 1 );
1386 <<
" Type: " << node.type()
1387 <<
" Target: " << TargetInfo::TargetType
1388 <<
" Data Type: " << input->info()->data_type()
1389 <<
" Input shape: " << input->info()->tensor_shape()
1404 template <
typename PriorBoxLayerFunction,
typename TargetInfo>
1407 validate_node<TargetInfo>(node, 2 , 1 );
1419 auto func = std::make_unique<PriorBoxLayerFunction>();
1420 func->configure(input0, input1, output, prior_info);
1425 <<
" Type: " << node.type()
1426 <<
" Target: " << TargetInfo::TargetType
1427 <<
" Data Type: " << input0->info()->data_type()
1428 <<
" Input0 shape: " << input0->info()->tensor_shape()
1429 <<
" Input1 shape: " << input1->info()->tensor_shape()
1430 <<
" Output shape: " << output->info()->tensor_shape()
1431 <<
" PriorBoxLayer info: " << prior_info
1434 return std::move(
func);
1446 template <
typename QuantizationLayerFunction,
typename TargetInfo>
1449 validate_node<TargetInfo>(node, 1 , 1 );
1458 auto func = std::make_unique<QuantizationLayerFunction>();
1459 func->configure(input, output);
1464 <<
" Type: " << node.type()
1465 <<
" Target: " << TargetInfo::TargetType
1466 <<
" Data Type: " << input->info()->data_type()
1467 <<
" Input shape: " << input->info()->tensor_shape()
1468 <<
" Output shape: " << output->info()->tensor_shape()
1471 return std::move(
func);
1484 template <
typename ReductionOperationFunction,
typename TargetInfo>
1487 validate_node<TargetInfo>(node, 1 , 1 );
1493 int axis = node.axis();
1494 bool keep_dims = node.keep_dims();
1499 auto func = std::make_unique<ReductionOperationFunction>(
get_memory_manager(ctx, TargetInfo::TargetType));
1500 func->configure(input, output, axis, op, keep_dims);
1505 <<
" Type: " << node.type()
1506 <<
" Target: " << TargetInfo::TargetType
1507 <<
" Data Type: " << input->info()->data_type()
1508 <<
" Input shape: " << input->info()->tensor_shape()
1509 <<
" Output shape: " << output->info()->tensor_shape()
1510 <<
" Operation: " << op
1511 <<
" Axis: " << axis
1512 <<
" Keep dimensions:" << keep_dims
1515 return std::move(
func);
1527 template <
typename ReorgLayerFunction,
typename TargetInfo>
1530 validate_node<TargetInfo>(node, 1 , 1 );
1539 auto func = std::make_unique<ReorgLayerFunction>();
1540 func->configure(input, output, node.stride());
1545 <<
" Type: " << node.type()
1546 <<
" Target: " << TargetInfo::TargetType
1547 <<
" Data Type: " << input->info()->data_type()
1548 <<
" Input shape: " << input->info()->tensor_shape()
1549 <<
" Output shape: " << output->info()->tensor_shape()
1552 return std::move(
func);
1564 template <
typename ReshapeLayerFunction,
typename TargetInfo>
1567 validate_node<TargetInfo>(node, 1 , 1 );
1576 auto func = std::make_unique<ReshapeLayerFunction>();
1577 func->configure(input, output);
1582 <<
" Type: " << node.type()
1583 <<
" Target: " << TargetInfo::TargetType
1584 <<
" Data Type: " << input->info()->data_type()
1585 <<
" Input shape: " << input->info()->tensor_shape()
1586 <<
" Output shape: " << output->info()->tensor_shape()
1589 return std::move(
func);
1601 template <
typename ResizeLayerFunction,
typename TargetInfo>
1604 validate_node<TargetInfo>(node, 1 , 1 );
1614 auto func = std::make_unique<ResizeLayerFunction>();
1620 <<
" Type: " << node.type()
1621 <<
" Target: " << TargetInfo::TargetType
1622 <<
" Data Type: " << input->info()->data_type()
1623 <<
" Input shape: " << input->info()->tensor_shape()
1624 <<
" Output shape: " << output->info()->tensor_shape()
1625 <<
" Interpolation: " << policy
1628 return std::move(
func);
1640 template <
typename ROIAlignLayerFunction,
typename TargetInfo>
1643 validate_node<TargetInfo>(node, 2 , 1 );
1656 auto func = std::make_unique<ROIAlignLayerFunction>();
1658 func->configure(input, rois, output, pool_info);
1663 <<
" Type: " << node.type()
1664 <<
" Target: " << TargetInfo::TargetType
1665 <<
" Data Type: " << input->info()->data_type()
1666 <<
" Input shape: " << input->info()->tensor_shape()
1667 <<
" Output shape: " << output->info()->tensor_shape()
1668 <<
" ROIs shape: " << rois->info()->tensor_shape()
1673 return std::move(
func);
1685 template <
typename SliceLayerFunction,
typename TargetInfo>
1688 validate_node<TargetInfo>(node, 1 , 1 );
1697 auto func = std::make_unique<SliceLayerFunction>();
1698 func->configure(input, output, node.starts(), node.ends());
1703 <<
" Type: " << node.type()
1704 <<
" Target: " << TargetInfo::TargetType
1705 <<
" Data Type: " << input->info()->data_type()
1706 <<
" Input shape: " << input->info()->tensor_shape()
1707 <<
" Output shape: " << output->info()->tensor_shape()
1710 return std::move(
func);
1723 template <
typename SoftmaxLayerFunction,
typename TargetInfo>
1726 validate_node<TargetInfo>(node, 1 , 1 );
1731 const float beta = node.beta();
1737 func->configure(input, output, beta);
1742 <<
" Type: " << node.type()
1743 <<
" Target: " << TargetInfo::TargetType
1744 <<
" Data Type: " << input->info()->data_type()
1745 <<
" Input shape: " << input->info()->tensor_shape()
1746 <<
" Output shape: " << output->info()->tensor_shape()
1749 return std::move(
func);
1761 template <
typename StackLayerFunction,
typename TargetInfo>
1768 std::vector<typename TargetInfo::TensorType *> inputs;
1769 for(
unsigned int i = 0; i < node.
num_inputs(); ++i)
1771 inputs.push_back(get_backing_tensor<TargetInfo>(node.
input(i)));
1774 const int axis = node.
axis();
1777 auto func = std::make_unique<StackLayerFunction>();
1778 func->configure(inputs, axis, output);
1783 <<
" Type: " << node.
type()
1784 <<
" Target: " << TargetInfo::TargetType
1785 <<
" Data Type: " << output->info()->data_type()
1786 <<
" Inputs shape: " << inputs[0]->info()->tensor_shape()
1787 <<
" Output shape: " << output->info()->tensor_shape()
1788 <<
" Num Inputs: " << inputs.size()
1789 <<
" Axis: " << axis
1792 return std::move(
func);
1804 template <
typename Str
idedSliceLayerFunction,
typename TargetInfo>
1807 validate_node<TargetInfo>(node, 1 , 1 );
1821 auto func = std::make_unique<StridedSliceLayerFunction>();
1827 <<
" Type: " << node.type()
1828 <<
" Target: " << TargetInfo::TargetType
1829 <<
" Data Type: " << input->info()->data_type()
1830 <<
" Input shape: " << input->info()->tensor_shape()
1831 <<
" Output shape: " << output->info()->tensor_shape()
1834 return std::move(
func);
std::unique_ptr< IFunction > create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
Create a backend normalization layer function.
std::unique_ptr< IFunction > create_dequantization_layer(DequantizationLayerNode &node)
Create a backend dequantize layer function.
std::string name() const
Returns node's name.
Class describing the value of a pixel for any image format.
InterpolationPolicy
Interpolation method.
std::unique_ptr< IFunction > create_arg_min_max_layer(ArgMinMaxLayerNode &node)
Creates a backend argminmax layer function.
Generate Proposals Information class.
EltwiseOperation
Supported Element-wise operations.
std::unique_ptr< IFunction > create_slice_layer(SliceLayerNode &node)
Create a backend slice layer function.
Batch Normalization Layer node.
Fused Depthwise Convolution Batch Normalization node.
Normalization Layer node.
std::unique_ptr< IFunction > create_eltwise_layer(EltwiseLayerNode &node)
Create a backend element-wise operation layer function.
bool enabled() const
Check if initialised.
std::vector< PaddingInfo > PaddingList
List of padding information.
ReductionOperation
Available reduction operations.
DataLayoutDimension concatenation_axis() const
Concatenation axis parameter accessor.
ITensorHandle * handle()
Backend tensor handle accessor.
std::unique_ptr< IFunction > create_batch_normalization_layer(BatchNormalizationLayerNode &node)
Create a backend batch normalization layer function.
std::unique_ptr< IFunction > create_depth_to_space_layer(DepthToSpaceLayerNode &node)
Create a backend depth to space layer function.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
std::unique_ptr< IFunction > create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Create a backend batch normalization layer function.
Target assigned_target() const
Returns assigned target for this node.
std::unique_ptr< IFunction > create_prelu_layer(PReluLayerNode &node)
Create a backend PRelu layer function.
std::unique_ptr< IFunction > create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
Create a backend layer depth-wise convolution function.
size_t num_outputs() const
Returns number of outputs of the node.
std::unique_ptr< IFunction > create_resize_layer(ResizeLayerNode &node)
Create a backend resize layer function.
Wrapper function to first apply {NE, CL}BatchNormalizationLayer on the weights and then run {NE...
Normalization Layer Information class.
Batch Normalization Layer node.
std::unique_ptr< IFunction > create_permute_layer(PermuteLayerNode &node)
Create a backend permute layer function.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
std::unique_ptr< IFunction > create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
Create a backend generate proposals layer function.
NodeType type() const override
Returns node's type.
std::unique_ptr< IFunction > create_l2_normalize_layer(L2NormalizeLayerNode &node, GraphContext &ctx)
Create a backend l2 normalization layer function.
const DataLayout data_layout
Fully connected layer info.
Fast math enabled for Convolution layer.
std::unique_ptr< IFunction > create_flatten_layer(FlattenLayerNode &node)
Create a backend flatten layer function.
unsigned int pooled_width() const
Get the pooled width of the layer.
Wrapper function to first apply {NE, CL}BatchNormalizationLayer on the weights and then run {NE...
std::unique_ptr< IFunction > create_pad_layer(PadLayerNode &node)
Create a backend pad layer function.
Activation Layer Information class.
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
std::unique_ptr< arm_compute::IFunction > create_stack_layer(StackLayerNode &node)
Create a backend layer stack function.
Copyright (c) 2017-2021 Arm Limited.
Batch Normalization node.
Samples are taken at pixel center.
Convolution Layer Weights Information class.
std::unique_ptr< IFunction > create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
Create a backend softmax layer function.
1 channel, 1 S32 per channel
TensorDescriptor & desc()
TensorInfo metadata accessor.
std::unique_ptr< IFunction > create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Create a backend fused depthwise convolution batch normalization layer function.
std::unique_ptr< IFunction > create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
Create a backend convolution layer function.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
std::unique_ptr< IFunction > create_roi_align_layer(ROIAlignLayerNode &node)
Create a backend ROI align layer function.
bool is_enabled() const
Enabled parameter accessor.
Tensor * output(size_t idx) const
Returns the tensor of a given output of the node.
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
const unsigned int num_groups
Pooling Layer Information struct.
void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
std::unique_ptr< IFunction > create_reduction_operation_layer(ReductionLayerNode &node, GraphContext &ctx)
Create a backend reduction operation layer function.
std::unique_ptr< IFunction > create_priorbox_layer(PriorBoxLayerNode &node)
Create a backend priorbox layer function.
NodeID id() const
Returns node's ID.
std::unique_ptr< IFunction > create_detection_output_layer(DetectionOutputLayerNode &node)
Create a backend detection output layer function.
Channel Shuffle Layer node.
Padding and stride information class.
Concatenation Layer node.
std::unique_ptr< IFunction > create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
Create a backend detection post process layer function.
Tensor handle interface object.
std::unique_ptr< IFunction > create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
Create a backend deconvolution layer function.
Reduction Operation node.
std::unique_ptr< IFunction > create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
Create a backend unary element-wise operation layer function.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
bool is_in_place_operation(void *input, void *output)
Checks if an operation is in place.
Strides of an item in bytes.
std::unique_ptr< IFunction > create_print_layer(PrintLayerNode &node)
Create a backend print layer function.
Detection Output layer info.
DetectionPostProcess Layer node.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
std::unique_ptr< IFunction > create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
Create a backend fully connected layer function.
std::unique_ptr< IFunction > create_activation_layer(ActivationLayerNode &node)
Creates a backend activation layer function.
DetectionOutput Layer node.
int axis() const
Stack axis parameter accessor.
int32_t shrink_axis_mask() const
std::unique_ptr< IFunction > create_strided_slice_layer(StridedSliceLayerNode &node)
Create a backend slice layer function.
unsigned int pooled_height() const
Get the pooled height of the layer.
ROI Pooling Layer Information class.
Class for specifying the size of an image or rectangle.
std::unique_ptr< IFunction > create_quantization_layer(QuantizationLayerNode &node)
Create a backend quantization layer function.
Unary Eltwise Layer node.
std::string to_string(const ICLTensor &arg)
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
std::unique_ptr< IFunction > create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
Create a backend normalize planar YUV layer function.
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
ConvolutionMethod
Supported Convolution layer methods.
std::unique_ptr< IFunction > create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
Create a backend bounding box transform layer function.
Detection Output layer info.
size_t num_inputs() const
Returns number of inputs of the node.
Generate Proposals Layer node.
TargetInfo::TensorType * get_backing_tensor(arm_compute::graph::Tensor *tensor)
Returns backing tensor of a given tensor.
Deconvolution Layer node.
Fully Connected Layer node.
Depthwise Convolution Layer node.
Winograd based convolution.
ActivationFunction activation() const
Get the type of activation function.
virtual NodeType type() const =0
Returns node's type.
Arithmetic multiplication.
std::shared_ptr< IWeightsManager > get_weights_manager(GraphContext &ctx, Target target)
Returns the weights manager for a given target.
Tensor * input(size_t idx) const
Returns the tensor of a given input of the node.
NodeType type() const override
Returns node's type.
DataLayout layout
Data layout.
std::unique_ptr< IFunction > create_pooling_layer(PoolingLayerNode &node)
Create a backend pooling layer function.
DataLayout
[DataLayout enum definition]
std::unique_ptr< arm_compute::IFunction > create_concatenate_layer(ConcatenateLayerNode &node)
Create a backend layer concatenate function.
int32_t begin_mask() const
ConvertPolicy
Policy to handle overflow.
std::unique_ptr< IFunction > create_reshape_layer(ReshapeLayerNode &node)
Create a backend reshape layer function.
size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get index of a tensor's given dimension depending on its layout.
std::unique_ptr< IFunction > create_reorg_layer(ReorgLayerNode &node)
Create a backend reorg layer function.
std::unique_ptr< IFunction > create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
Create a backend channel shuffle layer function.
UnaryEltwiseOperation
Supported Unary Element-wise operations.