24 #ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H 25 #define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H 61 template <
typename TargetInfo>
71 backing_tensor = (tensor_handle !=
nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) :
nullptr;
74 return backing_tensor;
77 template <
typename TargetInfo>
81 <<
" Target: " << TargetInfo::TargetType
82 <<
" ID: " << node.
id()
101 template <
typename ActivationLayerFunction,
typename TargetInfo>
104 validate_node<TargetInfo>(node, 1 , 1 );
112 auto func = std::make_unique<ActivationLayerFunction>();
113 func->configure(input, output, act_info);
117 <<
" Type: " << node.type()
118 <<
" Target: " << TargetInfo::TargetType
119 <<
" Data Type: " << input->info()->data_type()
120 <<
" Shape: " << input->info()->tensor_shape()
121 <<
" Activation function: " << act_info.activation()
122 <<
" a: " << act_info.a()
123 <<
" b: " << act_info.b()
127 return std::move(func);
139 template <
typename ArgMinMaxLayerFunction,
typename TargetInfo>
142 validate_node<TargetInfo>(node, 1 , 1 );
148 unsigned int axis = node.axis();
151 auto func = std::make_unique<ArgMinMaxLayerFunction>();
152 func->configure(input, axis, output, op);
156 <<
" Type: " << node.type()
157 <<
" Target: " << TargetInfo::TargetType
158 <<
" Data Type: " << input->info()->data_type()
159 <<
" Shape: " << input->info()->tensor_shape()
160 <<
" Reduction Operation: " << op
164 return std::move(func);
176 template <
typename BatchNormalizationLayerFunction,
typename TargetInfo>
179 validate_node<TargetInfo>(node, 5 , 1 );
189 const float epsilon = node.epsilon();
193 auto func = std::make_unique<BatchNormalizationLayerFunction>();
194 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
199 <<
" Type: " << node.type()
200 <<
" Target: " << TargetInfo::TargetType
201 <<
" Data Type: " << input->info()->data_type()
202 <<
" Shape: " << input->info()->tensor_shape()
203 <<
" Epsilon: " << epsilon <<
" " 208 return std::move(func);
221 template <
typename FusedLayerTypes,
typename TargetInfo>
224 validate_node<TargetInfo>(node, 7 , 1 );
238 const unsigned int num_groups = node.num_groups();
241 const float epsilon = node.epsilon();
245 std::unique_ptr<IFunction> func;
246 std::string func_name;
251 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
252 std::string(
"FusedConvolutionBatchNormalizationLayer"), mm,
input, weights, biases, output, mean, var, beta, gamma,
epsilon,
conv_info,
num_groups, fast_math, fused_act);
257 <<
" Type: " << node.type()
258 <<
" Target: " << TargetInfo::TargetType
259 <<
" Data Type: " << input->info()->data_type()
260 <<
" Input shape: " << input->info()->tensor_shape()
261 <<
" Weights shape: " << weights->info()->tensor_shape()
262 <<
" Output shape: " << output->info()->tensor_shape()
265 return std::move(func);
278 template <
typename FusedLayerTypes,
typename TargetInfo>
281 validate_node<TargetInfo>(node, 7 , 1 );
295 const unsigned int depth_multiplier = node.depth_multiplier();
297 const float epsilon = node.epsilon();
301 std::unique_ptr<IFunction> func;
302 std::string func_name;
307 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
308 std::string(
"FusedDepthwiseConvolutionBatchNormalizationLayer"), mm,
input, weights, biases, output, mean, var, beta, gamma,
epsilon,
conv_info, depth_multiplier, fused_act);
313 <<
" Type: " << node.type()
314 <<
" Target: " << TargetInfo::TargetType
315 <<
" Data Type: " << input->info()->data_type()
316 <<
" Input shape: " << input->info()->tensor_shape()
317 <<
" Weights shape: " << weights->info()->tensor_shape()
318 <<
" Output shape: " << output->info()->tensor_shape()
321 return std::move(func);
333 template <
typename BoundingBoxTransformLayerFunction,
typename TargetInfo>
336 validate_node<TargetInfo>(node, 2 , 1 );
345 auto func = std::make_unique<BoundingBoxTransformLayerFunction>();
346 func->configure(input, output, deltas, bbox_info);
351 <<
" Type: " << node.type()
352 <<
" Target: " << TargetInfo::TargetType
353 <<
" Data Type: " << input->info()->data_type()
354 <<
" Shape: " << input->info()->tensor_shape()
355 <<
" BoundingBox Info img W: " << bbox_info.img_width() <<
" " 356 <<
" BoundingBox Info img H: " << bbox_info.img_height() <<
" " 359 return std::move(func);
371 template <
typename ChannelShuffleLayerFunction,
typename TargetInfo>
374 validate_node<TargetInfo>(node, 1 , 1 );
379 const unsigned int num_groups = node.num_groups();
382 auto func = std::make_unique<ChannelShuffleLayerFunction>();
383 func->configure(input, output, num_groups);
387 <<
" Type: " << node.type()
388 <<
" Target: " << TargetInfo::TargetType
389 <<
" Data Type: " << input->info()->data_type()
390 <<
" Shape: " << input->info()->tensor_shape()
391 <<
" Num groups: " << num_groups
394 return std::move(func);
406 template <
typename ConcatenateLayerFunction,
typename TargetInfo>
419 std::vector<typename TargetInfo::SrcTensorType *> inputs;
420 for(
unsigned int i = 0; i < node.
num_inputs(); ++i)
422 inputs.push_back(get_backing_tensor<TargetInfo>(node.
input(i)));
429 auto func = std::make_unique<ConcatenateLayerFunction>();
430 func->configure(inputs, output, concat_axis);
434 std::ostringstream qss;
437 qss <<
" Output QuantInfo: " << output->info()->quantization_info();
441 <<
" Type: " << node.
type()
442 <<
" Target: " << TargetInfo::TargetType
443 <<
" Data Type: " << output->info()->data_type()
444 <<
" Shape: " << output->info()->tensor_shape()
445 <<
" Num Inputs: " << inputs.size()
446 <<
" Axis: " << concat_axis
450 return std::move(func);
463 template <
typename ConvolutionLayerFunctions,
typename TargetInfo>
466 validate_node<TargetInfo>(node, 3 , 1 );
482 const unsigned int num_groups = node.num_groups();
489 std::unique_ptr<IFunction> func;
490 std::string func_name;
495 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
496 std::string(
"WinogradConvolutionLayer"), mm,
502 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
503 std::string(
"DirectConvolutionLayer"),
508 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
509 std::string(
"GEMMConvolutionLayer"), mm,
515 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
516 std::string(
"GenericConvolutionLayer"), mm,
522 std::ostringstream qss;
525 qss <<
" Input QuantInfo: " << input->info()->quantization_info()
526 <<
" Weights QuantInfo: " << weights->info()->quantization_info()
527 <<
" Output QuantInfo: " << output->info()->quantization_info();
531 <<
" Type: " << func_name
532 <<
" Target: " << TargetInfo::TargetType
533 <<
" Data Type: " << input->info()->data_type()
534 <<
" Groups: " << num_groups
535 <<
" Input shape: " << input->info()->tensor_shape()
536 <<
" Weights shape: " << weights->info()->tensor_shape()
537 <<
" Output shape: " << output->info()->tensor_shape()
541 return std::move(func);
554 template <
typename ConvolutionLayerFunctions,
typename TargetInfo>
557 validate_node<TargetInfo>(node, 4 , 1 );
573 const unsigned int num_groups = node.num_groups();
578 auto &post_op_info_list = node.post_op_info_list();
579 for(
const auto &post_op_info : post_op_info_list)
581 switch(post_op_info->type())
585 const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
586 post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
589 case PostOpType::Eltwise_Add:
592 const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
593 post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
605 std::unique_ptr<IFunction> func;
606 std::string func_name;
609 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
610 std::string(
"GEMMConvolutionLayer"), mm,
615 std::ostringstream qss;
618 qss <<
" Input QuantInfo: " << input->info()->quantization_info()
619 <<
" Weights QuantInfo: " << weights->info()->quantization_info()
620 <<
" Output QuantInfo: " << output->info()->quantization_info();
624 <<
" Type: " << func_name
625 <<
" Target: " << TargetInfo::TargetType
626 <<
" Data Type: " << input->info()->data_type()
627 <<
" Groups: " << num_groups
628 <<
" Input shape: " << input->info()->tensor_shape()
629 <<
" Weights shape: " << weights->info()->tensor_shape()
630 <<
" Output shape: " << output->info()->tensor_shape()
632 << (fused_act.enabled() ?
" " +
to_string(fused_act.activation()) :
"")
633 <<
" Post ops" << post_ops
635 return std::move(func);
648 template <
typename FusedLayerTypes,
typename TargetInfo>
651 validate_node<TargetInfo>(node, 8 , 1 );
665 const unsigned int num_groups = node.num_groups();
667 const float epsilon = node.epsilon();
671 auto &post_op_info_list = node.post_op_info_list();
672 for(
const auto &post_op_info : post_op_info_list)
674 switch(post_op_info->type())
678 const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
679 post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
682 case PostOpType::Eltwise_Add:
685 const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
686 post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
698 std::unique_ptr<IFunction> func;
699 std::string func_name;
704 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
705 std::string(
"FusedConvolutionBatchNormalizationLayerWithPostOpsLayer"), mm,
input, weights, biases, output, mean, var, beta, gamma,
epsilon,
conv_info,
num_groups, fast_math,
post_ops);
710 <<
" Type: " << node.type()
711 <<
" Target: " << TargetInfo::TargetType
712 <<
" Data Type: " << input->info()->data_type()
713 <<
" Input shape: " << input->info()->tensor_shape()
714 <<
" Weights shape: " << weights->info()->tensor_shape()
715 <<
" Output shape: " << output->info()->tensor_shape()
716 <<
" Post Ops:" << post_ops
718 return std::move(func);
731 template <
typename DeconvolutionLayerFunction,
typename TargetInfo>
734 validate_node<TargetInfo>(node, 3 , 1 );
746 std::unique_ptr<IFunction> func;
748 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
750 input, weights, biases, output, deconv_info);
755 <<
" Type: " << node.type()
756 <<
" Target: " << TargetInfo::TargetType
757 <<
" Data Type: " << input->info()->data_type()
758 <<
" Input shape: " << input->info()->tensor_shape()
759 <<
" Weights shape: " << weights->info()->tensor_shape()
760 <<
" Output shape: " << output->info()->tensor_shape()
774 template <
typename DepthwiseConvolutionLayer,
typename TargetInfo>
777 validate_node<TargetInfo>(node, 3 , 1 );
793 const unsigned int depth_multiplier = node.depth_multiplier();
797 std::unique_ptr<IFunction> func;
798 std::string func_name;
800 std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
801 std::string(
"DepthwiseConvolutionLayer"),
802 input, weights, biases, output,
conv_info, depth_multiplier, fused_act);
805 std::ostringstream qss;
808 qss <<
" Input QuantInfo: " << input->info()->quantization_info()
809 <<
" Weights QuantInfo: " << weights->info()->quantization_info()
810 <<
" Output QuantInfo: " << output->info()->quantization_info();
814 <<
" Type: " << func_name
815 <<
" Target: " << TargetInfo::TargetType
816 <<
" Data Type: " << input->info()->data_type()
817 <<
" Input shape: " << input->info()->tensor_shape()
818 <<
" Weights shape: " << weights->info()->tensor_shape()
819 <<
" Output shape: " << output->info()->tensor_shape()
820 <<
" Depth multiplier: " << depth_multiplier
824 return std::move(func);
836 template <
typename DepthToSpaceLayerFunction,
typename TargetInfo>
839 validate_node<TargetInfo>(node, 1 , 1 );
849 auto func = std::make_unique<DepthToSpaceLayerFunction>();
850 func->configure(input, output, node.block_shape());
855 <<
" Type: " << node.type()
856 <<
" Target: " << TargetInfo::TargetType
857 <<
" Data Type: " << input->info()->data_type()
858 <<
" Input shape: " << input->info()->tensor_shape()
859 <<
" Block Size: " << node.block_shape()
860 <<
" Output shape: " << output->info()->tensor_shape()
863 return std::move(func);
875 template <
typename DequantizationLayerFunction,
typename TargetInfo>
878 validate_node<TargetInfo>(node, 1 , 1 );
888 auto func = std::make_unique<DequantizationLayerFunction>();
889 func->configure(input, output);
894 <<
" Type: " << node.type()
895 <<
" Target: " << TargetInfo::TargetType
896 <<
" Data Type: " << input->info()->data_type()
897 <<
" Input shape: " << input->info()->tensor_shape()
898 <<
" Input quantization info: " << output->info()->quantization_info()
899 <<
" Output shape: " << output->info()->tensor_shape()
902 return std::move(func);
913 template <
typename DetectionOutputLayerFunction,
typename TargetInfo>
916 validate_node<TargetInfo>(node, 3 , 1 );
931 auto func = std::make_unique<DetectionOutputLayerFunction>();
932 func->configure(input0, input1, input2, output, detect_info);
937 <<
" Type: " << node.type()
938 <<
" Target: " << TargetInfo::TargetType
939 <<
" Data Type: " << input0->info()->data_type()
940 <<
" Input0 shape: " << input0->info()->tensor_shape()
941 <<
" Input1 shape: " << input1->info()->tensor_shape()
942 <<
" Input2 shape: " << input2->info()->tensor_shape()
943 <<
" Output shape: " << output->info()->tensor_shape()
944 <<
" DetectionOutputLayer info: " << detect_info
947 return std::move(func);
959 template <
typename DetectionPostProcessLayerFunction,
typename TargetInfo>
962 validate_node<TargetInfo>(node, 3 , 4 );
983 auto func = std::make_unique<DetectionPostProcessLayerFunction>();
984 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
989 <<
" Type: " << node.type()
990 <<
" Target: " << TargetInfo::TargetType
991 <<
" Data Type: " << input0->info()->data_type()
992 <<
" Input0 shape: " << input0->info()->tensor_shape()
993 <<
" Input1 shape: " << input1->info()->tensor_shape()
994 <<
" Input2 shape: " << input2->info()->tensor_shape()
995 <<
" Output0 shape: " << output0->info()->tensor_shape()
996 <<
" Output1 shape: " << output1->info()->tensor_shape()
997 <<
" Output2 shape: " << output2->info()->tensor_shape()
998 <<
" Output3 shape: " << output3->info()->tensor_shape()
999 <<
" DetectionPostProcessLayer info: " << detect_info
1002 return std::move(func);
1014 template <
typename EltwiseFunctions,
typename TargetInfo>
1017 validate_node<TargetInfo>(node, 2 , 1 );
1030 std::unique_ptr<IFunction> func =
nullptr;
1031 std::string func_name;
1034 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
1035 std::string(
"ArithmeticAddition"),
1036 input1, input2, output, convert_policy, act_info);
1040 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
1041 std::string(
"ArithmeticSubtraction"),
1042 input1, input2, output, convert_policy, act_info);
1046 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
1047 std::string(
"PixelWiseMultiplication"),
1048 input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
1052 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
1053 std::string(
"ElementwiseMaximum"),
1054 input1, input2, output, act_info);
1058 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Division>(
1059 std::string(
"ArithmeticDivision"),
1060 input1, input2, output, act_info);
1070 <<
" Type: " << node.type()
1071 <<
" Target: " << TargetInfo::TargetType
1072 <<
" Operation: " << func_name
1073 <<
" Data Type: " << input1->info()->data_type()
1074 <<
" Shape: " << input1->info()->tensor_shape()
1077 return std::move(func);
1089 template <
typename UnaryEltwiseFunctions,
typename TargetInfo>
1092 validate_node<TargetInfo>(node, 1 , 1 );
1102 std::unique_ptr<IFunction> func =
nullptr;
1103 std::string func_name;
1106 std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
1118 <<
" Type: " << node.type()
1119 <<
" Target: " << TargetInfo::TargetType
1120 <<
" Operation: " << func_name
1121 <<
" Data Type: " << input->info()->data_type()
1122 <<
" Shape: " << input->info()->tensor_shape()
1125 return std::move(func);
1137 template <
typename FlattenLayerFunction,
typename TargetInfo>
1140 validate_node<TargetInfo>(node, 1 , 1 );
1150 auto func = std::make_unique<FlattenLayerFunction>();
1151 func->configure(input, output);
1156 <<
" Type: " << node.type()
1157 <<
" Target: " << TargetInfo::TargetType
1158 <<
" Data Type: " << input->info()->data_type()
1159 <<
" Input shape: " << input->info()->tensor_shape()
1160 <<
" Output shape: " << output->info()->tensor_shape()
1163 return std::move(func);
1176 template <
typename FullyConnectedLayerFunction,
typename TargetInfo>
1179 validate_node<TargetInfo>(node, 3 , 1 );
1196 auto func = std::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
1197 func->configure(input, weights, biases, output, fc_info);
1202 std::ostringstream qss;
1205 qss <<
" Input QuantInfo: " << input->info()->quantization_info()
1206 <<
" Weights QuantInfo: " << weights->info()->quantization_info()
1207 <<
" Output QuantInfo: " << output->info()->quantization_info();
1211 <<
" Type: " << node.type()
1212 <<
" Target: " << TargetInfo::TargetType
1213 <<
" Data Type: " << input->info()->data_type()
1215 <<
" Input shape: " << input->info()->tensor_shape()
1216 <<
" Weights shape: " << weights->info()->tensor_shape()
1217 <<
" Output shape: " << output->info()->tensor_shape()
1220 return std::move(func);
1233 template <
typename GenerateProposalsLayerFunction,
typename TargetInfo>
1236 validate_node<TargetInfo>(node, 3 , 3 );
1254 auto func = std::make_unique<GenerateProposalsLayerFunction>(
get_memory_manager(ctx, TargetInfo::TargetType));
1255 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1259 <<
" Target " << TargetInfo::TargetType
1260 <<
" Data Type: " << scores->info()->data_type()
1261 <<
" Scores shape: " << scores->info()->tensor_shape()
1262 <<
" Deltas shape: " << deltas->info()->tensor_shape()
1263 <<
" Anchors shape: " << anchors->info()->tensor_shape()
1264 <<
" Proposals shape: " << proposals->info()->tensor_shape()
1265 <<
" Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1266 <<
" Scores Out shape: " << scores_out->info()->tensor_shape()
1269 return std::move(func);
1282 template <
typename L2NormalizeLayerFunction,
typename TargetInfo>
1285 validate_node<TargetInfo>(node, 1 , 1 );
1290 int axis = node.axis();
1291 float epsilon = node.epsilon();
1298 auto func = std::make_unique<L2NormalizeLayerFunction>(mm);
1299 func->configure(input, output, axis, epsilon);
1304 <<
" Type: " << node.type()
1305 <<
" Target: " << TargetInfo::TargetType
1306 <<
" Data Type: " << input->info()->data_type()
1307 <<
" Input shape: " << input->info()->tensor_shape()
1308 <<
" Output shape: " << output->info()->tensor_shape()
1309 <<
" Axis: " << axis
1310 <<
" Epsilon: " << epsilon
1313 return std::move(func);
1326 template <
typename NormalizationLayerFunction,
typename TargetInfo>
1331 validate_node<TargetInfo>(node, 1 , 1 );
1341 auto func = std::make_unique<NormalizationLayerFunction>();
1342 func->configure(input, output, norm_info);
1347 <<
" Type: " << node.type()
1348 <<
" Target: " << TargetInfo::TargetType
1349 <<
" Data Type: " << input->info()->data_type()
1350 <<
" Input shape: " << input->info()->tensor_shape()
1351 <<
" Output shape: " << output->info()->tensor_shape()
1352 <<
" Normalization info: " << norm_info.type()
1355 return std::move(func);
1367 template <
typename NormalizePlanarYUVLayerFunction,
typename TargetInfo>
1370 validate_node<TargetInfo>(node, 3 , 1 );
1383 auto func = std::make_unique<NormalizePlanarYUVLayerFunction>();
1384 func->configure(input, output, mean, std);
1389 <<
" Type: " << node.type()
1390 <<
" Target: " << TargetInfo::TargetType
1391 <<
" Data Type: " << input->info()->data_type()
1392 <<
" Shape: " << input->info()->tensor_shape()
1395 return std::move(func);
1407 template <
typename PadLayerFunction,
typename TargetInfo>
1410 validate_node<TargetInfo>(node, 1 , 1 );
1416 const PixelValue pad_value = node.pad_value();
1421 auto func = std::make_unique<PadLayerFunction>();
1422 func->configure(input, output, padding, pad_value);
1427 <<
" Type: " << node.type()
1428 <<
" Target: " << TargetInfo::TargetType
1429 <<
" Data Type: " << input->info()->data_type()
1430 <<
" Input shape: " << input->info()->tensor_shape()
1431 <<
" Output shape: " << output->info()->tensor_shape()
1434 return std::move(func);
1446 template <
typename PermuteLayerFunction,
typename TargetInfo>
1449 validate_node<TargetInfo>(node, 1 , 1 );
1459 auto func = std::make_unique<PermuteLayerFunction>();
1460 func->configure(input, output, perm);
1465 <<
" Type: " << node.type()
1466 <<
" Target: " << TargetInfo::TargetType
1467 <<
" Data Type: " << input->info()->data_type()
1468 <<
" Input shape: " << input->info()->tensor_shape()
1469 <<
" Output shape: " << output->info()->tensor_shape()
1470 <<
" Permutation vector: " << perm
1473 return std::move(func);
1485 template <
typename PoolingLayerFunction,
typename TargetInfo>
1488 validate_node<TargetInfo>(node, 1 , 1 );
1498 auto func = std::make_unique<PoolingLayerFunction>();
1499 func->configure(input, output, pool_info);
1504 <<
" Type: " << node.type()
1505 <<
" Target: " << TargetInfo::TargetType
1506 <<
" Data Type: " << input->info()->data_type()
1507 <<
" Input shape: " << input->info()->tensor_shape()
1508 <<
" Output shape: " << output->info()->tensor_shape()
1509 <<
" Pooling info: " << pool_info.pool_type
1512 return std::move(func);
1524 template <
typename PReluFunction,
typename TargetInfo>
1527 validate_node<TargetInfo>(node, 2 , 1 );
1537 auto func = std::make_unique<PReluFunction>();
1538 func->configure(input, alpha, output);
1543 <<
" Type: " << node.type()
1544 <<
" Target: " << TargetInfo::TargetType
1545 <<
" Data Type: " << input->info()->data_type()
1546 <<
" Input shape: " << input->info()->tensor_shape()
1547 <<
" Output shape: " << output->info()->tensor_shape()
1550 return std::move(func);
1561 template <
typename TargetInfo>
1564 validate_node<TargetInfo>(node, 1 , 1 );
1573 <<
" Type: " << node.type()
1574 <<
" Target: " << TargetInfo::TargetType
1575 <<
" Data Type: " << input->info()->data_type()
1576 <<
" Input shape: " << input->info()->tensor_shape()
1591 template <
typename PriorBoxLayerFunction,
typename TargetInfo>
1594 validate_node<TargetInfo>(node, 2 , 1 );
1606 auto func = std::make_unique<PriorBoxLayerFunction>();
1607 func->configure(input0, input1, output, prior_info);
1612 <<
" Type: " << node.type()
1613 <<
" Target: " << TargetInfo::TargetType
1614 <<
" Data Type: " << input0->info()->data_type()
1615 <<
" Input0 shape: " << input0->info()->tensor_shape()
1616 <<
" Input1 shape: " << input1->info()->tensor_shape()
1617 <<
" Output shape: " << output->info()->tensor_shape()
1618 <<
" PriorBoxLayer info: " << prior_info
1621 return std::move(func);
1633 template <
typename QuantizationLayerFunction,
typename TargetInfo>
1636 validate_node<TargetInfo>(node, 1 , 1 );
1645 auto func = std::make_unique<QuantizationLayerFunction>();
1646 func->configure(input, output);
1651 <<
" Type: " << node.type()
1652 <<
" Target: " << TargetInfo::TargetType
1653 <<
" Data Type: " << input->info()->data_type()
1654 <<
" Input shape: " << input->info()->tensor_shape()
1655 <<
" Output shape: " << output->info()->tensor_shape()
1658 return std::move(func);
1671 template <
typename ReductionOperationFunction,
typename TargetInfo>
1674 validate_node<TargetInfo>(node, 1 , 1 );
1680 int axis = node.axis();
1681 bool keep_dims = node.keep_dims();
1686 auto func = std::make_unique<ReductionOperationFunction>(
get_memory_manager(ctx, TargetInfo::TargetType));
1687 func->configure(input, output, axis, op, keep_dims);
1692 <<
" Type: " << node.type()
1693 <<
" Target: " << TargetInfo::TargetType
1694 <<
" Data Type: " << input->info()->data_type()
1695 <<
" Input shape: " << input->info()->tensor_shape()
1696 <<
" Output shape: " << output->info()->tensor_shape()
1697 <<
" Operation: " << op
1698 <<
" Axis: " << axis
1699 <<
" Keep dimensions:" << keep_dims
1702 return std::move(func);
1714 template <
typename ReorgLayerFunction,
typename TargetInfo>
1717 validate_node<TargetInfo>(node, 1 , 1 );
1726 auto func = std::make_unique<ReorgLayerFunction>();
1727 func->configure(input, output, node.stride());
1732 <<
" Type: " << node.type()
1733 <<
" Target: " << TargetInfo::TargetType
1734 <<
" Data Type: " << input->info()->data_type()
1735 <<
" Input shape: " << input->info()->tensor_shape()
1736 <<
" Output shape: " << output->info()->tensor_shape()
1739 return std::move(func);
1751 template <
typename ReshapeLayerFunction,
typename TargetInfo>
1754 validate_node<TargetInfo>(node, 1 , 1 );
1763 auto func = std::make_unique<ReshapeLayerFunction>();
1764 func->configure(input, output);
1769 <<
" Type: " << node.type()
1770 <<
" Target: " << TargetInfo::TargetType
1771 <<
" Data Type: " << input->info()->data_type()
1772 <<
" Input shape: " << input->info()->tensor_shape()
1773 <<
" Output shape: " << output->info()->tensor_shape()
1776 return std::move(func);
1788 template <
typename ResizeLayerFunction,
typename TargetInfo>
1791 validate_node<TargetInfo>(node, 1 , 1 );
1801 auto func = std::make_unique<ResizeLayerFunction>();
1807 <<
" Type: " << node.type()
1808 <<
" Target: " << TargetInfo::TargetType
1809 <<
" Data Type: " << input->info()->data_type()
1810 <<
" Input shape: " << input->info()->tensor_shape()
1811 <<
" Output shape: " << output->info()->tensor_shape()
1812 <<
" Interpolation: " << policy
1815 return std::move(func);
1827 template <
typename ROIAlignLayerFunction,
typename TargetInfo>
1830 validate_node<TargetInfo>(node, 2 , 1 );
1843 auto func = std::make_unique<ROIAlignLayerFunction>();
1845 func->configure(input, rois, output, pool_info);
1850 <<
" Type: " << node.type()
1851 <<
" Target: " << TargetInfo::TargetType
1852 <<
" Data Type: " << input->info()->data_type()
1853 <<
" Input shape: " << input->info()->tensor_shape()
1854 <<
" Output shape: " << output->info()->tensor_shape()
1855 <<
" ROIs shape: " << rois->info()->tensor_shape()
1860 return std::move(func);
1872 template <
typename SliceLayerFunction,
typename TargetInfo>
1875 validate_node<TargetInfo>(node, 1 , 1 );
1884 auto func = std::make_unique<SliceLayerFunction>();
1885 func->configure(input, output, node.starts(), node.ends());
1890 <<
" Type: " << node.type()
1891 <<
" Target: " << TargetInfo::TargetType
1892 <<
" Data Type: " << input->info()->data_type()
1893 <<
" Input shape: " << input->info()->tensor_shape()
1894 <<
" Output shape: " << output->info()->tensor_shape()
1897 return std::move(func);
1910 template <
typename SoftmaxLayerFunction,
typename TargetInfo>
1913 validate_node<TargetInfo>(node, 1 , 1 );
1918 const float beta = node.beta();
1923 auto func = std::make_unique<SoftmaxLayerFunction>(
get_memory_manager(ctx, TargetInfo::TargetType));
1924 func->configure(input, output, beta);
1929 <<
" Type: " << node.type()
1930 <<
" Target: " << TargetInfo::TargetType
1931 <<
" Data Type: " << input->info()->data_type()
1932 <<
" Input shape: " << input->info()->tensor_shape()
1933 <<
" Output shape: " << output->info()->tensor_shape()
1936 return std::move(func);
1948 template <
typename StackLayerFunction,
typename TargetInfo>
1955 std::vector<typename TargetInfo::TensorType *> inputs;
1956 for(
unsigned int i = 0; i < node.
num_inputs(); ++i)
1958 inputs.push_back(get_backing_tensor<TargetInfo>(node.
input(i)));
1961 const int axis = node.
axis();
1964 auto func = std::make_unique<StackLayerFunction>();
1965 func->configure(inputs, axis, output);
1970 <<
" Type: " << node.
type()
1971 <<
" Target: " << TargetInfo::TargetType
1972 <<
" Data Type: " << output->info()->data_type()
1973 <<
" Inputs shape: " << inputs[0]->info()->tensor_shape()
1974 <<
" Output shape: " << output->info()->tensor_shape()
1975 <<
" Num Inputs: " << inputs.size()
1976 <<
" Axis: " << axis
1979 return std::move(func);
1991 template <
typename Str
idedSliceLayerFunction,
typename TargetInfo>
1994 validate_node<TargetInfo>(node, 1 , 1 );
2008 auto func = std::make_unique<StridedSliceLayerFunction>();
2014 <<
" Type: " << node.type()
2015 <<
" Target: " << TargetInfo::TargetType
2016 <<
" Data Type: " << input->info()->data_type()
2017 <<
" Input shape: " << input->info()->tensor_shape()
2018 <<
" Output shape: " << output->info()->tensor_shape()
2021 return std::move(func);
std::unique_ptr< IFunction > create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
Create a backend normalization layer function.
Wrapper function to first apply {NE, CL}BatchNormalizationLayer on the weights and then run {NE...
std::unique_ptr< IFunction > create_dequantization_layer(DequantizationLayerNode &node)
Create a backend dequantize layer function.
std::string name() const
Returns node's name.
Class describing the value of a pixel for any image format.
InterpolationPolicy
Interpolation method.
std::unique_ptr< IFunction > create_arg_min_max_layer(ArgMinMaxLayerNode &node)
Creates a backend argminmax layer function.
Generate Proposals Information class.
experimental::PostOpList< ITensorInfo * > post_ops
EltwiseOperation
Supported Element-wise operations.
std::unique_ptr< IFunction > create_slice_layer(SliceLayerNode &node)
Create a backend slice layer function.
Batch Normalization Layer node.
Fused Depthwise Convolution Batch Normalization node.
Normalization Layer node.
std::unique_ptr< IFunction > create_eltwise_layer(EltwiseLayerNode &node)
Create a backend element-wise operation layer function.
bool enabled() const
Check if initialised.
std::vector< PaddingInfo > PaddingList
List of padding information.
ReductionOperation
Available reduction operations.
DataLayoutDimension concatenation_axis() const
Concatenation axis parameter accessor.
ITensorHandle * handle()
Backend tensor handle accessor.
std::unique_ptr< IFunction > create_batch_normalization_layer(BatchNormalizationLayerNode &node)
Create a backend batch normalization layer function.
std::unique_ptr< IFunction > create_depth_to_space_layer(DepthToSpaceLayerNode &node)
Create a backend depth to space layer function.
#define ARM_COMPUTE_ERROR(msg)
Print the given message then throw an std::runtime_error.
std::unique_ptr< IFunction > create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Create a backend batch normalization layer function.
Target assigned_target() const
Returns assigned target for this node.
std::unique_ptr< IFunction > create_prelu_layer(PReluLayerNode &node)
Create a backend PRelu layer function.
std::unique_ptr< IFunction > create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
Create a backend layer depth-wise convolution function.
size_t num_outputs() const
Returns number of outputs of the node.
std::unique_ptr< IFunction > create_resize_layer(ResizeLayerNode &node)
Create a backend resize layer function.
Wrapper function to first apply {NE, CL}BatchNormalizationLayer on the weights and then run {NE...
Normalization Layer Information class.
Batch Normalization Layer node.
std::unique_ptr< IFunction > create_permute_layer(PermuteLayerNode &node)
Create a backend permute layer function.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
std::unique_ptr< IFunction > create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
Create a backend generate proposals layer function.
NodeType type() const override
Returns node's type.
std::unique_ptr< IFunction > create_l2_normalize_layer(L2NormalizeLayerNode &node, GraphContext &ctx)
Create a backend l2 normalization layer function.
Fully connected layer info.
Fast math enabled for Convolution layer.
std::unique_ptr< IFunction > create_flatten_layer(FlattenLayerNode &node)
Create a backend flatten layer function.
unsigned int pooled_width() const
Get the pooled width of the layer.
Wrapper function to first apply {NE, CL}BatchNormalizationLayer on the weights and then run {NE...
std::unique_ptr< IFunction > create_pad_layer(PadLayerNode &node)
Create a backend pad layer function.
Activation Layer Information class.
bool enable_fast_math
Enable fast math computation.
#define ARM_COMPUTE_LOG_GRAPH_INFO(x)
std::unique_ptr< arm_compute::IFunction > create_stack_layer(StackLayerNode &node)
Create a backend layer stack function.
Copyright (c) 2017-2022 Arm Limited.
Batch Normalization node.
Samples are taken at pixel center.
Convolution Layer Weights Information class.
std::unique_ptr< IFunction > create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
Create a backend softmax layer function.
1 channel, 1 S32 per channel
TensorDescriptor & desc()
TensorInfo metadata accessor.
std::unique_ptr< IFunction > create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Create a backend fused depthwise convolution batch normalization layer function.
std::unique_ptr< IFunction > create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
Create a backend convolution layer function.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
std::unique_ptr< IFunction > create_roi_align_layer(ROIAlignLayerNode &node)
Create a backend ROI align layer function.
bool is_enabled() const
Enabled parameter accessor.
Tensor * output(size_t idx) const
Returns the tensor of a given output of the node.
#define ARM_COMPUTE_ERROR_ON_MSG(cond, msg)
const unsigned int num_groups
Pooling Layer Information struct.
void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
std::unique_ptr< IFunction > create_reduction_operation_layer(ReductionLayerNode &node, GraphContext &ctx)
Create a backend reduction operation layer function.
std::unique_ptr< IFunction > create_priorbox_layer(PriorBoxLayerNode &node)
Create a backend priorbox layer function.
NodeID id() const
Returns node's ID.
std::unique_ptr< IFunction > create_detection_output_layer(DetectionOutputLayerNode &node)
Create a backend detection output layer function.
Channel Shuffle Layer node.
Padding and stride information class.
Concatenation Layer node.
std::string to_string(const T &val)
Fallback method: try to use std::to_string:
Batch Normalization node.
std::unique_ptr< IFunction > create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
Create a backend detection post process layer function.
Tensor handle interface object.
std::unique_ptr< IFunction > create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
Create a backend deconvolution layer function.
Reduction Operation node.
std::unique_ptr< IFunction > create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
Create a backend unary element-wise operation layer function.
bool is_data_type_quantized_asymmetric(DataType dt)
Check if a given data type is of asymmetric quantized type.
bool is_in_place_operation(void *input, void *output)
Checks if an operation is in place.
Strides of an item in bytes.
std::unique_ptr< IFunction > create_print_layer(PrintLayerNode &node)
Create a backend print layer function.
Detection Output layer info.
DetectionPostProcess Layer node.
ScaleKernelInfo info(interpolation_policy, default_border_mode, PixelValue(), sampling_policy, false)
std::unique_ptr< IFunction > create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
Create a backend fully connected layer function.
std::unique_ptr< IFunction > create_activation_layer(ActivationLayerNode &node)
Creates a backend activation layer function.
DetectionOutput Layer node.
int axis() const
Stack axis parameter accessor.
int32_t shrink_axis_mask() const
std::unique_ptr< IFunction > create_strided_slice_layer(StridedSliceLayerNode &node)
Create a backend slice layer function.
unsigned int pooled_height() const
Get the pooled height of the layer.
ROI Pooling Layer Information class.
Class for specifying the size of an image or rectangle.
std::unique_ptr< IFunction > create_quantization_layer(QuantizationLayerNode &node)
Create a backend quantization layer function.
Unary Eltwise Layer node.
std::shared_ptr< IMemoryManager > get_memory_manager(GraphContext &ctx, Target target)
Returns the memory manager for a given target.
std::unique_ptr< IFunction > create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
Create a backend normalize planar YUV layer function.
#define ARM_COMPUTE_LOG_GRAPH_VERBOSE(x)
ConvolutionMethod
Supported Convolution layer methods.
std::unique_ptr< IFunction > create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
Create a backend bounding box transform layer function.
Detection Output layer info.
size_t num_inputs() const
Returns number of inputs of the node.
Generate Proposals Layer node.
TargetInfo::TensorType * get_backing_tensor(arm_compute::graph::Tensor *tensor)
Returns backing tensor of a given tensor.
Deconvolution Layer node.
Fully Connected Layer node.
Depthwise Convolution Layer node.
Winograd based convolution.
ActivationFunction activation() const
Get the type of activation function.
virtual NodeType type() const =0
Returns node's type.
Arithmetic multiplication.
std::shared_ptr< IWeightsManager > get_weights_manager(GraphContext &ctx, Target target)
Returns the weights manager for a given target.
Tensor * input(size_t idx) const
Returns the tensor of a given input of the node.
NodeType type() const override
Returns node's type.
DataLayout layout
Data layout.
std::unique_ptr< IFunction > create_fused_convolution_with_post_op(FusedConvolutionWithPostOpNode &node, GraphContext &ctx)
Create a backend convolution layer function with post operator.
std::unique_ptr< IFunction > create_pooling_layer(PoolingLayerNode &node)
Create a backend pooling layer function.
DataLayout
[DataLayout enum definition]
std::unique_ptr< arm_compute::IFunction > create_concatenate_layer(ConcatenateLayerNode &node)
Create a backend layer concatenate function.
int32_t begin_mask() const
A sequence of PostOps that can be appended to the end of other operators.
ConvertPolicy
Policy to handle integer overflow.
std::unique_ptr< IFunction > create_reshape_layer(ReshapeLayerNode &node)
Create a backend reshape layer function.
size_t get_dimension_idx(DataLayout data_layout, const DataLayoutDimension data_layout_dimension)
Get index of a tensor's given dimension depending on its layout.
std::unique_ptr< IFunction > create_fused_convolution_batch_normalization_with_post_op(FusedConvolutionBatchNormalizationWithPostOpsNode &node, GraphContext &ctx)
Create a backend convolution batch normalization layer function with post operator.
std::unique_ptr< IFunction > create_reorg_layer(ReorgLayerNode &node)
Create a backend reorg layer function.
std::unique_ptr< IFunction > create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
Create a backend channel shuffle layer function.
UnaryEltwiseOperation
Supported Unary Element-wise operations.