14 #include <fmt/format.h> 16 #include <google/protobuf/text_format.h> 17 #include <google/protobuf/io/zero_copy_stream_impl.h> 23 using namespace armnn;
28 IOnnxParser::IOnnxParser() : pOnnxParserImpl(new OnnxParserImpl()) {}
30 IOnnxParser::~IOnnxParser() =
default;
54 return pOnnxParserImpl->CreateNetworkFromTextFile(graphFile);
59 return pOnnxParserImpl->CreateNetworkFromString(protoText);
62 BindingPointInfo IOnnxParser::GetNetworkInputBindingInfo(
const std::string& name)
const 64 return pOnnxParserImpl->GetNetworkInputBindingInfo(name);
67 BindingPointInfo IOnnxParser::GetNetworkOutputBindingInfo(
const std::string& name)
const 69 return pOnnxParserImpl->GetNetworkOutputBindingInfo(name);
74 void CheckValidDataType(std::initializer_list<onnx::TensorProto::DataType> validInputTypes,
76 const char* validExpr,
78 std::string tensorName,
81 bool isValid = std::any_of(validInputTypes.begin(),
82 validInputTypes.end(),
87 fmt::format(
"Datatype {} is not valid for tensor '{}' of node '{}', not in {{{}}}. {}",
88 onnx::TensorProto::DataType_Name(actualValue),
96 #define CHECK_VALID_DATATYPE(NODE, TENSOR, ACTUAL, ...) \ 97 CheckValidDataType({__VA_ARGS__}, ACTUAL, #__VA_ARGS__, NODE, TENSOR, CHECK_LOCATION()) 99 using StrTypeListPair = std::pair<const char*, std::initializer_list<onnx::TensorProto::DataType>>;
100 #define STR_LIST(...) StrTypeListPair(#__VA_ARGS__, {__VA_ARGS__}) 102 template <
typename Callable>
103 void ReadMandatoryNodeAttributeImpl(
const onnx::NodeProto& node,
104 const std::string& attribName,
105 onnx::AttributeProto::AttributeType expectedType,
108 auto attribs = node.attribute();
110 while (attriNum < node.attribute_size())
112 if (attribs.Get(attriNum).name() == attribName)
114 if (attribs.Get(attriNum).type() == expectedType)
116 callable(attribs.Get(attriNum));
120 throw ParseException(fmt::format(
"Attribute {} of node {} expected to have {} as " 121 "onnx::AttributeProto::AttributeType, but found {} instead {}",
124 onnx::AttributeProto::AttributeType_Name(expectedType),
125 onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type()),
132 if (attriNum == node.attribute_size())
134 throw ParseException(fmt::format(
"Could not find required attribute {} in node {} {}",
139 template <
typename Callable>
140 void ReadOptionalNodeAttributeImpl(
const onnx::NodeProto& node,
141 const std::string& attribName,
142 onnx::AttributeProto::AttributeType expectedType,
145 auto attribs = node.attribute();
146 for (
int attriNum = 0; attriNum < node.attribute_size(); ++attriNum)
148 if (attribs.Get(attriNum).name() == attribName)
150 if (attribs.Get(attriNum).type() == expectedType)
152 callable(attribs.Get(attriNum));
157 fmt::format(
"Attribute {} of node {} expected to have {} as onnx::AttributeProto::AttributeType, " 158 "but found {} instead {}",
161 onnx::AttributeProto::AttributeType_Name(expectedType),
162 onnx::AttributeProto::AttributeType_Name(attribs.Get(attriNum).type()),
169 int64_t ReadOptionalNodeInt64Attribute(
const onnx::NodeProto& node,
170 const std::string& name,
171 const int64_t defaultValue = 0)
173 int64_t attribValue = defaultValue;
174 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INT,
175 [&attribValue](
const onnx::AttributeProto& attrValue)
177 attribValue = attrValue.i();
182 std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(
const onnx::NodeProto& node,
183 const std::string& name)
185 std::vector<uint32_t> attriList;
186 ReadMandatoryNodeAttributeImpl(node, name, onnx::AttributeProto::INTS,
187 [&attriList](
const onnx::AttributeProto& attrValue)
189 for (
int attriNum = 0; attriNum < attrValue.ints_size(); ++attriNum)
197 uint32_t ReadOptionalNodeUint32Attribute(
const onnx::NodeProto& node,
198 const std::string& name,
199 const uint32_t defaultVal = 0u)
201 uint32_t attribValue = defaultVal;
202 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INT,
203 [&attribValue](
const onnx::AttributeProto& attrValue)
210 std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(
const onnx::NodeProto& node,
211 const std::string& name)
213 std::vector<uint32_t> attriList;
214 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::INTS,
215 [&attriList](
const onnx::AttributeProto& attrValue)
217 for (
int attriNum = 0; attriNum < attrValue.ints_size(); ++attriNum)
226 float ReadOptionalNodeFloatAttribute(
const onnx::NodeProto& node,
227 const std::string& name,
228 const float defaultValue = 0.0f)
230 float attribValue = defaultValue;
231 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::FLOAT,
232 [&attribValue](
const onnx::AttributeProto& attrValue)
234 attribValue = attrValue.f();
239 std::string ReadOptionalNodeStringAttribute(
const onnx::NodeProto& node,
const std::string& name)
241 std::string attribValue =
"";
242 ReadOptionalNodeAttributeImpl(node, name, onnx::AttributeProto::STRING,
243 [&attribValue](
const onnx::AttributeProto& attrValue)
245 attribValue = attrValue.s();
255 case onnx::TensorProto::FLOAT:
257 type = DataType::Float32;
260 case onnx::TensorProto::INT32:
261 case onnx::TensorProto::INT64:
263 type = DataType::Signed32;
269 fmt::format(
"'{}' is not a currently supported datatype for tensor {}." 270 " Supported dataTypes are FLOAT, INT32 and INT64. {}",
271 onnx::TensorProto::DataType_Name(static_cast<onnx::TensorProto::DataType>(data_type)),
288 const onnx::TensorShapeProto onnxShape = info.type().tensor_type().shape();
289 std::vector<unsigned int> shapeDims;
290 for (
int i = 0; i < onnxShape.dim_size(); ++i)
295 if (shapeDims.empty())
297 shapeDims.push_back(1);
300 return ToTensorInfo(info.name(), shapeDims, info.type().tensor_type().elem_type());
305 std::vector<unsigned int> shapeDims;
307 for (
auto dim: tensor.dims())
312 if (shapeDims.empty())
314 shapeDims.push_back(1);
317 return ToTensorInfo(tensor.name(), shapeDims, tensor.data_type());
320 std::string TensorInfoAsString(
const TensorInfo& info,
321 const std::string& name,
325 std::stringstream ss;
326 ss <<
"tensor '" << name <<
"' contains " 327 << onnx::TensorProto::DataType_Name(type)
328 <<
" and has shape [";
332 ss << shape[i] <<
", ";
338 void CalcPadding(uint32_t inputSize,
342 uint32_t* paddingFront,
343 uint32_t* paddingBack,
346 uint32_t outputSize = (inputSize + stride - 1) / stride;
347 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
348 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
349 *paddingFront = (temp - inputSize) / 2;
350 *paddingBack = *paddingFront;
351 if((temp - inputSize) % 2 == 1)
366 const std::string& outName)
368 std::vector<int> targetDims;
374 targetDims.push_back(static_cast<int>(inShape[static_cast<uint>(i)]));
378 targetDims.push_back(val);
382 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
383 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
384 if (stretchDim != targetDims.end())
386 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
388 std::stringstream ss;
390 for(uint i = 0; i < targetDims.size() - 1; ++i)
392 ss << targetDims[i] <<
", ";
394 ss << targetDims[targetDims.size() - 1] <<
" ]";
397 fmt::format(
"Error during creation of reshaped tensor '{}'. At most one component of shape can be " 398 " -1 and here, shape is {} {}",
404 auto targetNumElements =
armnn::numeric_cast<
unsigned int>(std::accumulate(targetDims.begin(), targetDims.end(),
405 -1, std::multiplies<int32_t>()));
406 auto stretchIndex =
static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
407 outDims[stretchIndex] = inShape.
GetNumElements() / targetNumElements;
410 return TensorInfo(outShape, DataType::Float32);
415 const std::map<std::string, OnnxParserImpl::OperationParsingFunction> OnnxParserImpl::m_ParserFunctions = {
416 {
"BatchNormalization", &OnnxParserImpl::ParseBatchNormalization},
417 {
"GlobalAveragePool", &OnnxParserImpl::ParseGlobalAveragePool},
418 {
"AveragePool", &OnnxParserImpl::ParseAveragePool },
419 {
"Clip", &OnnxParserImpl::ParseClip },
420 {
"Constant", &OnnxParserImpl::ParseConstant },
421 {
"MaxPool", &OnnxParserImpl::ParseMaxPool },
422 {
"Reshape", &OnnxParserImpl::ParseReshape },
423 {
"Sigmoid", &OnnxParserImpl::ParseSigmoid },
424 {
"Tanh", &OnnxParserImpl::ParseTanh },
425 {
"Relu", &OnnxParserImpl::ParseRelu },
426 {
"LeakyRelu", &OnnxParserImpl::ParseLeakyRelu },
427 {
"Conv", &OnnxParserImpl::ParseConv },
428 {
"Add", &OnnxParserImpl::ParseAdd },
429 {
"Flatten", &OnnxParserImpl::ParseFlatten},
432 template<
typename TypePair,
typename Location>
433 void OnnxParserImpl::ValidateInputs(
const onnx::NodeProto& node,
434 TypePair validInputs,
435 const Location& location)
437 for(
auto input : node.input())
439 CheckValidDataType(validInputs.second,
440 m_TensorsInfo[input].m_dtype,
448 #define VALID_INPUTS(NODE, VALID_INPUTS) \ 449 OnnxParserImpl::ValidateInputs(NODE, \ 453 std::vector<TensorInfo> OnnxParserImpl::ComputeOutputInfo(std::vector<std::string> outNames,
455 std::vector<TensorShape> inputShapes)
458 bool needCompute = std::any_of(outNames.begin(),
460 [
this](std::string name)
462 return (m_TensorsInfo.count(name) == 0 || m_TensorsInfo[name].m_info ==
nullptr);
464 std::vector<TensorInfo> outInfo;
466 std::vector<TensorShape> inferredShapes;
472 for (uint i = 0; i < outNames.size(); ++i)
476 m_TensorsInfo[outNames[i]] = OnnxTensor();
477 m_TensorsInfo[outNames[i]].m_info = std::make_unique<TensorInfo>(
478 TensorInfo(inferredShapes[i], DataType::Float32));
480 outInfo.push_back(*m_TensorsInfo[outNames[i]].m_info);
485 OnnxParserImpl::OnnxParserImpl()
486 : m_Network(nullptr, nullptr)
490 void OnnxParserImpl::ResetParser()
496 void OnnxParserImpl::Cleanup()
498 m_TensorConnections.clear();
499 m_TensorsInfo.clear();
500 m_OutputsMap.clear();
501 m_OutputsFusedAndUsed.clear();
505 std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
510 ARMNN_ASSERT_MSG(bufferPtr !=
nullptr, fmt::format(
"Buffer for permutation is null").c_str());
518 reinterpret_cast<const T*
>(bufferPtr), data.get(),
sizeof(T));
522 ::memcpy(data.get(), bufferPtr, tensorInfo.
GetNumBytes());
525 return std::make_pair(
ConstTensor(tensorInfo, data.get()), std::move(data));
528 std::pair<ConstTensor, std::unique_ptr<float[]>>
529 OnnxParserImpl::CreateConstTensor(
const std::string name,
532 TensorInfo tensorInfo = *m_TensorsInfo[name].m_info;
533 onnx::TensorProto onnxTensor = *m_TensorsInfo[name].m_tensor;
541 throw ParseException(fmt::format(
"No tensor data found for Const tensor '{}' {}",
546 auto srcData = onnxTensor.float_data().data();
548 if (!onnxTensor.has_raw_data())
550 if(tensorInfo.
GetNumElements() !=
static_cast<uint
>(onnxTensor.float_data_size()))
553 fmt::format(
"The number of data provided ({}) does not match the tensor '{}' number of " 555 onnxTensor.float_data_size(),
560 return CreateConstTensorImpl<float>(srcData, tensorInfo, permutationVector);
564 return CreateConstTensorImpl<float>(
reinterpret_cast<const float*
>(onnxTensor.raw_data().c_str()),
572 FILE* fd = fopen(graphFile,
"r");
580 ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
581 using google::protobuf::io::FileInputStream;
582 std::unique_ptr<FileInputStream> input = std::make_unique<FileInputStream>(fileno(fd));
583 bool success = google::protobuf::TextFormat::Parse(input.get(), modelProto.get());
588 std::stringstream
error;
589 error <<
"Failed to parse graph file";
599 return CreateNetworkFromModel(*modelProto);
605 FILE* fd = fopen(graphFile,
"rb");
613 ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
615 google::protobuf::io::FileInputStream inStream(fileno(fd));
616 google::protobuf::io::CodedInputStream codedStream(&inStream);
617 codedStream.SetTotalBytesLimit(INT_MAX);
618 bool success = modelProto.get()->ParseFromCodedStream(&codedStream);
623 std::stringstream
error;
624 error <<
"Failed to parse graph file";
635 return CreateNetworkFromModel(*modelProto);
646 ModelPtr modelProto = std::make_unique<onnx::ModelProto>();
647 bool success = google::protobuf::TextFormat::ParseFromString(protoText, modelProto.get());
650 std::stringstream
error;
651 error <<
"Failed to parse graph file";
661 return CreateNetworkFromModel(*modelProto);
664 INetworkPtr OnnxParserImpl::CreateNetworkFromModel(onnx::ModelProto& model)
666 m_Network = INetwork::Create();
669 m_Graph = std::make_unique<onnx::GraphProto>(*model.mutable_graph());
678 return std::move(m_Network);
681 void OnnxParserImpl::LoadGraph()
686 SetupInfo(m_Graph->mutable_output());
687 SetupInfo(m_Graph->mutable_input());
688 SetupInfo(m_Graph->mutable_value_info());
690 for (
auto tensor : m_Graph->initializer())
692 m_TensorsInfo[tensor.name()].m_tensor = std::make_unique<const onnx::TensorProto>(tensor);
693 m_TensorsInfo[tensor.name()].m_info = std::make_unique<TensorInfo>(
ToTensorInfo(tensor));
694 m_TensorsInfo[tensor.name()].m_dtype =
702 DetectFullyConnected();
705 for(
size_t nodeIndex = 0; nodeIndex < static_cast<size_t>(m_Graph->node_size()); nodeIndex++)
707 auto node = m_Graph->node(static_cast<int>(nodeIndex));
708 const std::string& operation = node.op_type();
711 if (operation ==
"MatMul" )
713 if(m_OutputsFusedAndUsed[nodeIndex].inputForNodes != m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.size())
716 AddFullyConnected(node);
719 else if (!(m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.empty()) && operation ==
"Add")
721 int matmulIndex =
static_cast<int> (m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes[0]);
722 AddFullyConnected(m_Graph->node(matmulIndex), &node);
724 else if (m_OutputsFusedAndUsed[nodeIndex].fusedWithNodes.empty())
726 auto it = m_ParserFunctions.find(operation);
727 if (it != m_ParserFunctions.end())
729 auto func = it->second;
734 throw ParseException(fmt::format(
"Unsupported operation {} for node '{}' {}",
743 for (
const auto& tensorCon : m_TensorConnections)
745 if (tensorCon.second.outputSlot !=
nullptr)
747 for (
size_t inputSlotIdx = 0; inputSlotIdx < tensorCon.second.inputSlots.size(); ++inputSlotIdx)
749 tensorCon.second.outputSlot->Connect(*(tensorCon.second.inputSlots[inputSlotIdx]));
755 void OnnxParserImpl::SetupInfo(
const google::protobuf::RepeatedPtrField<onnx::ValueInfoProto >* list)
757 for (
auto tensor : *list)
759 m_TensorsInfo[tensor.name()] = OnnxTensor();
760 m_TensorsInfo[tensor.name()].m_info = std::make_unique<TensorInfo>(
ToTensorInfo(tensor));
761 m_TensorsInfo[tensor.name()].m_dtype =
766 void OnnxParserImpl::DetectFullyConnected()
768 m_OutputsFusedAndUsed = std::vector<UsageSummary> (
static_cast<size_t>(m_Graph->node_size()), UsageSummary());
769 auto matmulAndConstant = [&](
const std::string& constInput,
770 const std::string& matmulInput,
773 auto matmulIt = m_OutputsMap.find(matmulInput);
774 if(matmulIt != m_OutputsMap.end() && matmulIt->second.first->op_type() ==
"MatMul" 775 && m_TensorsInfo[constInput].isConstant())
777 nodeIndex = matmulIt->second.second;
783 for(
int nodeIndex = 0; nodeIndex < m_Graph->node_size(); nodeIndex++)
785 const onnx::NodeProto* node = &m_Graph->node(nodeIndex);
786 for (
const std::string& output : node->output())
788 m_OutputsMap[output] = std::make_pair(node, nodeIndex);
791 for (
const std::string& input : node->input())
793 auto matmulIt = m_OutputsMap.find(input);
794 if(matmulIt != m_OutputsMap.end()){
795 ++m_OutputsFusedAndUsed[
static_cast<size_t>(matmulIt->second.second)].inputForNodes;
799 if (node->op_type() ==
"Add")
802 if (matmulAndConstant(node->input(0), node->input(1), matmulIndex) ||
803 matmulAndConstant(node->input(1), node->input(0), matmulIndex))
806 m_OutputsFusedAndUsed[
static_cast<size_t>(matmulIndex)].fusedWithNodes
807 .push_back(static_cast<size_t>(nodeIndex));
809 m_OutputsFusedAndUsed[
static_cast<size_t>(nodeIndex)].fusedWithNodes
810 .push_back(static_cast<size_t>(matmulIndex));
815 for (
auto output: m_Graph->output()) {
816 auto matmulIt = m_OutputsMap.find(output.name());
817 if(matmulIt != m_OutputsMap.end()){
818 ++m_OutputsFusedAndUsed[
static_cast<size_t>(matmulIt->second.second)].inputForNodes;
823 template<
typename Location>
824 void OnnxParserImpl::GetInputAndParam(
const onnx::NodeProto& node,
825 std::string* inputName,
826 std::string* constName,
827 const Location& location)
830 if (m_TensorsInfo[node.input(0)].isConstant())
834 else if (m_TensorsInfo[node.input(1)].isConstant())
840 throw ParseException(fmt::format(
"One of the input tensors ('{}' or '{}') should be constant in node '{}' {}",
844 location.AsString()));
848 *constName = node.input(cstIndex);
852 *inputName = node.input(!cstIndex);
856 template<
typename Location>
857 void OnnxParserImpl::To1DTensor(
const std::string& name,
const Location& location)
859 TensorShape shape = m_TensorsInfo[name].m_info->GetShape();
860 std::vector<uint32_t> newShape;
866 fmt::format(
"Only tensors with shape [1, ..., 1, X] can be converted to 1D and {} {}",
867 TensorInfoAsString(*m_TensorsInfo[name].m_info, name, m_TensorsInfo[name].m_dtype),
868 location.AsString()));
873 m_TensorsInfo[name].m_info->SetShape(
TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data()));
876 void OnnxParserImpl::AddConvLayerWithDepthwiseConv(
const onnx::NodeProto& node,
const Convolution2dDescriptor& convDesc)
893 auto weightTensor = CreateConstTensor(node.input(1), perVec);
895 if (node.input_size() == 3)
897 if(!m_TensorsInfo[node.input(2)].isConstant())
899 throw ParseException(fmt::format(
"Bias '{}' should be constant in Conv layer '{}' {}",
904 desc.m_BiasEnabled =
true;
905 auto biasTensor = CreateConstTensor(node.input(2));
906 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
909 node.name().c_str());
913 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
916 node.name().c_str());
920 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
921 { m_TensorsInfo[node.input(0)].m_info->GetShape(),
922 weightTensor.first.GetInfo().GetShape() });
928 RegisterInputSlots(layer, {node.input(0)});
931 RegisterOutputSlots(layer, {node.output(0)});
934 void OnnxParserImpl::AddFullyConnected(
const onnx::NodeProto& matmulNode,
const onnx::NodeProto* addNode)
938 std::string weightName;
939 std::string inputName;
944 GetInputAndParam(matmulNode, &inputName, &weightName,
CHECK_LOCATION());
953 std::string biasName;
962 TensorInfo weightInfo = *m_TensorsInfo[weightName].m_info;
963 TensorInfo biasInfo = *m_TensorsInfo[biasName].m_info;
968 fmt::format(
"Shape of weights '{}' and bias of following Add node '{}' do not match : {}" 969 " and {} ( /!\\ bias should be a 1D tensor) {}",
972 TensorInfoAsString(*m_TensorsInfo[weightName].m_info, weightName,
973 m_TensorsInfo[weightName].m_dtype),
974 TensorInfoAsString(*m_TensorsInfo[biasName].m_info, biasName,
975 m_TensorsInfo[biasName].m_dtype ),
980 layer = m_Network->AddFullyConnectedLayer(desc, matmulNode.name().c_str());
983 auto outputInfo = ComputeOutputInfo({addNode->output(0)}, layer,
984 {m_TensorsInfo[inputName].m_info->GetShape(),
985 m_TensorsInfo[weightName].m_info->GetShape()});
989 if(m_TensorsInfo[weightName].isConstant())
991 IConnectableLayer* weightsLayer = m_Network->AddConstantLayer(CreateConstTensor(weightName).first);
998 if(m_TensorsInfo[biasName].isConstant())
1000 IConnectableLayer* biasLayer = m_Network->AddConstantLayer(CreateConstTensor(biasName).first);
1007 RegisterInputSlots(layer, {inputName, weightName, biasName});
1008 RegisterOutputSlots(layer, {addNode->output(0)});
1012 layer = m_Network->AddFullyConnectedLayer(desc, matmulNode.name().c_str());
1015 auto outputInfo = ComputeOutputInfo({matmulNode.output(0)}, layer,
1016 {m_TensorsInfo[inputName].m_info->GetShape(),
1017 m_TensorsInfo[weightName].m_info->GetShape()});
1021 if(m_TensorsInfo[weightName].isConstant())
1023 TensorInfo weightInfo = *m_TensorsInfo[weightName].m_info;
1024 IConnectableLayer* weightsLayer = m_Network->AddConstantLayer(CreateConstTensor(weightName).first);
1031 RegisterInputSlots(layer, {inputName, weightName});
1032 RegisterOutputSlots(layer, {matmulNode.output(0)});
1036 void OnnxParserImpl::AddPoolingLayer(
const onnx::NodeProto& node,
Pooling2dDescriptor& desc)
1044 std::vector<uint32_t> kernel_shape = ReadMandatoryNodeUint32ListAttribute(node,
"kernel_shape");
1045 std::vector<uint32_t> strides = ReadOptionalNodeUint32ListAttribute(node,
"strides");
1046 std::vector<uint32_t> pads = ReadOptionalNodeUint32ListAttribute(node,
"pads");
1067 std::string paddingString = ReadOptionalNodeStringAttribute(node,
"auto_pad");
1068 if(paddingString !=
"VALID" && paddingString !=
"" && paddingString !=
"NOTSET")
1071 if( paddingString ==
"SAME_LOWER")
1075 else if (paddingString ==
"SAME_UPPER")
1081 throw ParseException(fmt::format(
"Invalid auto_pad attribute for node {}. " 1082 "Only SAME_UPPER, SAME_LOWER or VALID supported and found {} {}",
1087 auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
1088 uint32_t inputHeight = inputInfo.GetShape()[2];
1089 uint32_t inputWidth = inputInfo.GetShape()[3];
1090 CalcPadding(inputHeight,
1097 CalcPadding(inputWidth,
1114 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
1117 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
1122 RegisterInputSlots(layer, {node.input(0)});
1125 RegisterOutputSlots(layer, {node.output(0)});
1128 std::pair<std::string, std::string> OnnxParserImpl::AddPrepareBroadcast(
const std::string& input0,
1129 const std::string& input1)
1131 std::pair<std::string, std::string> inputs = std::make_pair(input0, input1);
1133 TensorShape input0Shape = m_TensorsInfo[input0].m_info->GetShape();
1134 TensorShape input1Shape = m_TensorsInfo[input1].m_info->GetShape();
1138 auto outputName = fmt::format(
"reshape_output_{}", input1);
1139 PrependForBroadcast(outputName, input1, input0);
1140 inputs.second = outputName;
1144 auto outputName = fmt::format(
"reshape_output_{}", input0);
1145 PrependForBroadcast(outputName, input0, input1);
1146 inputs.first = outputName;
1151 void OnnxParserImpl::CreateConstantLayer(
const std::string& tensorName,
const std::string& layerName)
1153 auto armnnTensor = CreateConstTensor(tensorName);
1155 IConnectableLayer* layer = m_Network->AddConstantLayer(armnnTensor.first, layerName.c_str());
1157 RegisterOutputSlots(layer, {tensorName});
1160 void OnnxParserImpl::CreateReshapeLayer(
const std::string& inputName,
1161 const std::string& outputName,
1162 const std::string& layerName)
1164 const TensorInfo outputTensorInfo = *m_TensorsInfo[outputName].m_info;
1168 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1174 RegisterInputSlots(layer, {inputName});
1177 RegisterOutputSlots(layer, {outputName});
1190 if (func == ActivationFunction::BoundedReLu)
1192 desc.
m_A = node.input(2).empty() ? std::numeric_limits<float>::max() : std::stof(node.input(2));
1193 desc.
m_B = node.input(1).empty() ? std::numeric_limits<float>::lowest() : std::stof(node.input(1));
1196 IConnectableLayer*
const layer = m_Network->AddActivationLayer(desc, node.name().c_str());
1199 auto outputInfo = ComputeOutputInfo({ node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
1200 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1204 RegisterInputSlots(layer, {node.input(0)});
1207 RegisterOutputSlots(layer, {node.output(0)});
1210 void OnnxParserImpl::ParseClip(
const onnx::NodeProto& node)
1212 ParseActivation(node, ActivationFunction::BoundedReLu);
1215 void OnnxParserImpl::ParseSigmoid(
const onnx::NodeProto& node)
1217 ParseActivation(node, ActivationFunction::Sigmoid);
1220 void OnnxParserImpl::ParseTanh(
const onnx::NodeProto& node)
1222 ParseActivation(node, ActivationFunction::TanH);
1225 void OnnxParserImpl::ParseRelu(
const onnx::NodeProto& node)
1227 ParseActivation(node, ActivationFunction::ReLu);
1230 void OnnxParserImpl::ParseLeakyRelu(
const onnx::NodeProto& node)
1232 ParseActivation(node, ActivationFunction::LeakyReLu);
1235 void OnnxParserImpl::ParseAdd(
const onnx::NodeProto& node)
1246 auto inputs = AddPrepareBroadcast(node.input(0), node.input(1));
1247 auto input0 = *m_TensorsInfo[inputs.first].m_info;
1248 auto input1 = *m_TensorsInfo[inputs.second].m_info;
1249 ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
1251 unsigned int numDims = input0.GetNumDimensions();
1252 for (
unsigned int i = 0; i < numDims; i++)
1254 unsigned int dim0 = input0.GetShape()[i];
1255 unsigned int dim1 = input1.GetShape()[i];
1256 if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
1259 fmt::format(
"Broadcast is only supported for scalar or 1D tensors in Add node '{}'. " 1260 "Input dimensions should either match or one should be of size 1 and here, " 1263 TensorInfoAsString(*m_TensorsInfo[inputs.first].m_info, inputs.first,
1264 m_TensorsInfo[inputs.first].m_dtype),
1265 TensorInfoAsString(*m_TensorsInfo[inputs.second].m_info, inputs.second,
1266 m_TensorsInfo[inputs.second].m_dtype),
1275 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
1276 { m_TensorsInfo[inputs.first].m_info->GetShape(),
1277 m_TensorsInfo[inputs.second].m_info->GetShape() });
1281 if(m_TensorsInfo[inputs.first].isConstant()) {
1282 CreateConstantLayer(inputs.first, fmt::format(
"Add:constant_of_{}", node.input(0)));
1284 if(m_TensorsInfo[inputs.second].isConstant()) {
1285 CreateConstantLayer(inputs.second, fmt::format(
"Add:constant_of_{}", node.input(1)));
1287 RegisterInputSlots(layer, {inputs.first, inputs.second});
1290 RegisterOutputSlots(layer, {node.output(0)});
1293 void OnnxParserImpl::ParseAveragePool(
const onnx::NodeProto& node)
1298 uint32_t count_include_pad = 0;
1299 count_include_pad = ReadOptionalNodeUint32Attribute(node,
"count_include_pad");
1300 if(count_include_pad) {
1303 AddPoolingLayer(node, desc);
1306 void OnnxParserImpl::ParseBatchNormalization(
const onnx::NodeProto& node)
1314 for(
int ind = 1; ind < node.input_size(); ++ind)
1316 auto tensor = node.input(ind);
1317 if(! m_TensorsInfo[tensor].isConstant())
1320 fmt::format(
"Input tensor '{}' should be constant in BatchNormalization node '{}' {}",
1327 float epsilon = ReadOptionalNodeFloatAttribute(node,
"epsilon", 1e-5f);
1329 desc.
m_Eps = epsilon;
1331 auto scaleTensor = CreateConstTensor(node.input(1));
1332 auto biasTensor = CreateConstTensor(node.input(2));
1333 auto meanTensor = CreateConstTensor(node.input(3));
1334 auto varTensor = CreateConstTensor(node.input(4));
1341 node.name().c_str());
1344 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
1345 layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
1347 RegisterInputSlots(layer, {node.input(0)});
1350 RegisterOutputSlots(layer, {node.output(0)});
1353 void OnnxParserImpl::ParseConstant(
const onnx::NodeProto& node)
1356 if (!node.attribute(0).has_t())
1358 throw ParseException(fmt::format(
"Value not found for Constant node '{}' {}",
1362 const onnx::TensorProto& onnxTensor = node.attribute(0).t();
1369 m_TensorsInfo[node.output(0)].m_tensor = std::make_unique<const onnx::TensorProto>(onnxTensor);
1370 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(
ToTensorInfo(onnxTensor));
1373 CreateConstantLayer(node.output(0), node.name());
1376 void OnnxParserImpl::ParseConv(
const onnx::NodeProto& node)
1383 if(m_TensorsInfo[node.input(0)].m_info->GetNumDimensions() != 4)
1386 fmt::format(
"ArmNN only supports 2D convolution and Conv layer '{}' input {} {}",
1388 TensorInfoAsString(*m_TensorsInfo[node.input(0)].m_info, node.input(0),
1389 m_TensorsInfo[node.input(0)].m_dtype),
1393 if(!m_TensorsInfo[node.input(1)].isConstant())
1396 fmt::format(
"Weights '{}' should be constant in Conv layer '{}' {}",
1402 auto inputInfo = *m_TensorsInfo[node.input(0)].m_info;
1407 std::vector<uint32_t> strides = ReadOptionalNodeUint32ListAttribute(node,
"strides");
1419 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(node,
"dilations");
1420 if(!dilations.empty())
1426 std::vector<uint32_t> pads = ReadOptionalNodeUint32ListAttribute(node,
"pads");
1431 std::string paddingString = ReadOptionalNodeStringAttribute(node,
"auto_pad");
1432 if(paddingString !=
"VALID" && paddingString !=
"" && paddingString !=
"NOTSET")
1435 if( paddingString ==
"SAME_LOWER")
1439 else if (paddingString ==
"SAME_UPPER")
1446 fmt::format(
"Invalid auto_pad attribute for node {}. Only SAME_UPPER, SAME_LOWER or VALID " 1447 "supported and found {} {}",
1452 uint32_t inputHeight = inputInfo.GetShape()[2];
1453 uint32_t inputWidth = inputInfo.GetShape()[3];
1455 uint32_t weightHeight;
1456 uint32_t weightWidth;
1457 std::vector<uint32_t> kernel_shape = ReadOptionalNodeUint32ListAttribute(node,
"kernel_shape");
1458 if (kernel_shape.empty())
1460 const TensorInfo weightTensorInfo = *m_TensorsInfo[node.input(1)].m_info;
1461 weightHeight = weightTensorInfo.
GetShape()[2];
1462 weightWidth = weightTensorInfo.
GetShape()[3];
1466 weightHeight = kernel_shape[0];
1467 weightWidth = kernel_shape[1];
1469 CalcPadding(inputHeight,
1476 CalcPadding(inputWidth,
1493 uint32_t group = ReadOptionalNodeUint32Attribute(node,
"group", 1);
1496 if (group > inputInfo.GetShape()[1])
1499 fmt::format(
"Error parsing Convolution node: {}. " 1500 "The 'group'={} parameter cannot be larger than the " 1501 "channel of the input shape={} (in NCHW format). {}",
1504 inputInfo.GetShape()[1],
1507 else if (group == inputInfo.GetShape()[1])
1511 AddConvLayerWithDepthwiseConv(node, desc);
1518 throw ParseException(fmt::format(
"Error parsing Convolution node: {}. " 1519 "The 'group'={} parameter should be 1 or be equal to the " 1520 "channel of the input shape={} (in NCHW format). {}",
1523 inputInfo.GetShape()[1],
1529 auto weightTensor = CreateConstTensor(node.input(1));
1531 if (node.input_size() == 3)
1533 if(!m_TensorsInfo[node.input(2)].isConstant())
1535 throw ParseException(fmt::format(
"Bias '{}' should be constant in Conv layer '{}' {}",
1541 auto biasTensor = CreateConstTensor(node.input(2));
1542 layer = m_Network->AddConvolution2dLayer(desc,
1545 node.name().c_str());
1549 layer = m_Network->AddConvolution2dLayer(desc,
1552 node.name().c_str());
1556 auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
1557 { m_TensorsInfo[node.input(0)].m_info->GetShape(),
1558 m_TensorsInfo[node.input(1)].m_info->GetShape() });
1563 RegisterInputSlots(layer, {node.input(0)});
1566 RegisterOutputSlots(layer, {node.output(0)});
1569 void OnnxParserImpl::ParseFlatten(
const onnx::NodeProto& node)
1575 m_TensorsInfo[node.input(0)].m_dtype,
1576 onnx::TensorProto::FLOAT);
1578 int64_t axis = ReadOptionalNodeInt64Attribute(node,
"axis", 1);
1579 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
1590 throw ParseException(fmt::format(
"Axis '{}' invalid. Tensor has '{}' dimensions in FlattenLayer '{}'",
1600 for (i = 0; i < axis; i++){
1601 dimension1 *= inputShape[i];
1606 dimension2 *= inputShape[i];
1611 auto outInfo = ComputeReshapeInfo(outputShape, inputShape, node.output(0));
1612 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(outInfo);
1613 CreateReshapeLayer(node.input(0), node.output(0), node.name());
1616 void OnnxParserImpl::ParseGlobalAveragePool(
const onnx::NodeProto& node)
1622 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
1626 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
1629 auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape});
1634 RegisterInputSlots(layer, {node.input(0)});
1637 RegisterOutputSlots(layer, {node.output(0)});
1640 void OnnxParserImpl::ParseMaxPool(
const onnx::NodeProto& node)
1645 AddPoolingLayer(node, desc);
1648 void OnnxParserImpl::ParseReshape(
const onnx::NodeProto& node)
1654 m_TensorsInfo[node.input(0)].m_dtype,
1655 onnx::TensorProto::FLOAT);
1657 m_TensorsInfo[node.input(1)].m_dtype,
1658 onnx::TensorProto::INT64);
1660 if(!m_TensorsInfo[node.input(1)].isConstant())
1662 throw ParseException(fmt::format(
"Shape '{}' should be constant in Reshape layer '{}' {}",
1668 if(m_TensorsInfo[node.input(0)].isConstant())
1671 if(m_TensorsInfo.count(node.output(0)) == 0)
1673 m_TensorsInfo[node.output(0)] = OnnxTensor();
1675 m_TensorsInfo[node.output(0)].m_tensor =
1676 std::make_unique<onnx::TensorProto>(*m_TensorsInfo[node.input(0)].m_tensor);
1680 TensorShape inputShape = m_TensorsInfo[node.input(0)].m_info->GetShape();
1682 if(m_TensorsInfo.count(node.output(0)) == 0 || m_TensorsInfo[node.output(0)].m_info ==
nullptr)
1684 uint64_t dims =
static_cast<uint64_t
>(m_TensorsInfo[node.input(1)].m_tensor->int64_data_size());
1685 TensorShape targetShape{
static_cast<unsigned int>(dims), 1};
1687 for(uint i = 0; i < dims; i++)
1689 int val =
CHECKED_INT32(m_TensorsInfo[node.input(1)].m_tensor->int64_data(static_cast<int>(i)));
1690 targetShape[i]=
static_cast<unsigned int>(val);
1693 auto outInfo = ComputeReshapeInfo(targetShape, inputShape, node.output(0));
1694 m_TensorsInfo[node.output(0)].m_info = std::make_unique<TensorInfo>(outInfo);
1697 CreateReshapeLayer(node.input(0), node.output(0), node.name());
1701 void OnnxParserImpl::PrependForBroadcast(
const std::string& outputName,
1702 const std::string& input0,
1703 const std::string& input1)
1708 TensorShape input0Shape = m_TensorsInfo[input0].m_info->GetShape();
1709 TensorShape input1Shape = m_TensorsInfo[input1].m_info->GetShape();
1712 std::vector<uint32_t> newShape;
1715 newShape.push_back(1);
1720 newShape.push_back(input0Shape[dim]);
1722 outputTensorInfo.
SetShape(
TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data()));
1725 m_TensorsInfo[outputName] = OnnxTensor();
1726 m_TensorsInfo[outputName].m_info = std::make_unique<TensorInfo>(outputTensorInfo);
1729 if( ! m_TensorsInfo[input0].isConstant())
1731 CreateReshapeLayer(input0, outputName, fmt::format(
"Add:reshapeOf{}", input0));
1735 m_TensorsInfo[outputName].m_tensor = std::make_unique<onnx::TensorProto>(*m_TensorsInfo[input0].m_tensor);
1740 void OnnxParserImpl::SetupInputLayers()
1743 for(
int inputIndex = 0; inputIndex < m_Graph->input_size(); ++inputIndex)
1745 auto input = m_Graph->input(inputIndex);
1746 if (! m_TensorsInfo[input.name()].isConstant())
1749 m_Network->AddInputLayer(static_cast<armnn::LayerBindingId>(inputIndex), input.name().c_str());
1753 RegisterOutputSlots(layer,{ input.name() });
1758 void OnnxParserImpl::SetupOutputLayers()
1760 if(m_Graph->output_size() == 0)
1765 for(
int outputIndex = 0; outputIndex < m_Graph->output_size(); ++outputIndex)
1768 m_Network->AddOutputLayer(static_cast<armnn::LayerBindingId>(outputIndex),
1769 m_Graph->output(outputIndex).name().c_str());
1771 RegisterInputSlots(layer, { m_Graph->output(outputIndex).name() });
1775 void OnnxParserImpl::RegisterInputSlots(
IConnectableLayer* layer,
const std::vector<std::string>& tensorIds)
1781 fmt::format(
"The number of tensor inputs ({}) does not match the number expected ({}) {}",
1787 for (
unsigned int slotIndex = 0; slotIndex < layer->
GetNumInputSlots(); ++slotIndex)
1789 std::string tensorId = tensorIds[slotIndex];
1792 auto it = m_TensorConnections.find(tensorId);
1794 if (it == m_TensorConnections.end())
1797 m_TensorConnections[tensorId] = TensorSlots();
1799 m_TensorConnections[tensorId].inputSlots.push_back(slot);
1803 void OnnxParserImpl::RegisterOutputSlots(
IConnectableLayer* layer,
const std::vector<std::string>& tensorIds)
1809 fmt::format(
"The number of tensor outputs ({}) does not match the number expected ({}) {} ",
1815 for (
unsigned int slotIndex = 0; slotIndex < layer->
GetNumOutputSlots(); ++slotIndex)
1817 std::string tensorId = tensorIds[slotIndex];
1820 auto it = m_TensorConnections.find(tensorId);
1822 if (it == m_TensorConnections.end())
1825 m_TensorConnections[tensorId] = TensorSlots();
1828 TensorSlots& tensorSlots = m_TensorConnections[tensorId];
1831 if (tensorSlots.outputSlot !=
nullptr)
1833 throw ParseException(fmt::format(
"Another layer has already registered itself as the producer of " 1838 tensorSlots.outputSlot = slot;
1844 for(
int i = 0; i < m_Graph->input_size(); ++i)
1846 auto input = m_Graph->input(i);
1847 if(input.name() == name)
1849 return std::make_pair(static_cast<armnn::LayerBindingId>(i),
ToTensorInfo(input));
1858 for(
int i = 0; i < m_Graph->output_size(); ++i)
1860 auto output = m_Graph->output(i);
1861 if(output.name() == name)
1863 return std::make_pair(static_cast<armnn::LayerBindingId>(i),
ToTensorInfo(output));
1872 if(model ==
nullptr) {
1877 std::vector<std::string> inputNames;
1878 std::map<std::string, bool> isConstant;
1879 for(
auto tensor : model->graph().initializer())
1881 isConstant[tensor.name()] =
true;
1883 for(
auto input : model->graph().input())
1885 auto it = isConstant.find(input.name());
1886 if(it == isConstant.end())
1888 inputNames.push_back(input.name());
1896 if(model ==
nullptr) {
1901 std::vector<std::string> outputNames;
1902 for(
auto output : model->graph().output())
1904 outputNames.push_back(output.name());
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
const TensorShape & GetShape() const
uint32_t m_PadLeft
Padding left value in the width dimension.
std::string AsString() const
A ReshapeDescriptor for the ReshapeLayer.
BindingPointInfo GetNetworkInputBindingInfo(const std::string &name) const
Retrieve binding info (layer id and tensor info) for the network input identified by the given layer ...
uint32_t m_PoolWidth
Pooling width value.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumBytes() const
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
BindingPointInfo GetNetworkOutputBindingInfo(const std::string &name) const
Retrieve binding info (layer id and tensor info) for the network output identified by the given layer...
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
#define VALID_INPUTS(NODE, VALID_INPUTS)
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_DilationY
Dilation along y axis.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile)
Create the network from a protobuf binary file on disk.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
static const std::string GetVersion()
Retrieve version in X.Y.Z form.
static std::vector< std::string > GetInputs(ModelPtr &model)
Retrieve inputs names.
void SetShape(const TensorShape &newShape)
TensorShape m_TargetShape
Target shape value.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::unique_ptr< onnx::ModelProto > ModelPtr
armnn::INetworkPtr CreateNetworkFromTextFile(const char *graphFile)
Create the network from a protobuf text file on disk.
static std::vector< std::string > GetOutputs(ModelPtr &model)
Retrieve outputs names.
uint32_t m_PadRight
Padding right value in the width dimension.
#define ARMNN_ASSERT_MSG(COND, MSG)
An output connection slot for a layer.
armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile)
Create the network from a protobuf binary file on disk.
bool has_value() const noexcept
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
#define CHECK_VALID_SIZE(ACTUAL,...)
#define CHECKED_NON_NEGATIVE(VALUE)
static ModelPtr LoadModelFromString(const std::string &inputString)
#define ARMNN_ASSERT(COND)
An ActivationDescriptor for the ActivationLayer.
static ModelPtr LoadModelFromTextFile(const char *fileName)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
uint32_t m_DilationX
Dilation along x axis.
std::pair< armnn::ConstTensor, std::unique_ptr< T[]> > CreateConstTensorImpl(const T *bufferPtr, armnn::TensorInfo &tensorInfo, const armnn::Optional< armnn::PermutationVector &> permutationVector)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
virtual int Connect(IInputSlot &destination)=0
#define CHECK_VALID_DATATYPE(NODE, TENSOR, ACTUAL,...)
virtual std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const =0
Infer the shape of the output(s) based on the provided input shape(s)
armnn::BindingPointInfo BindingPointInfo
armnn::INetworkPtr CreateNetworkFromString(const std::string &protoText)
Create the network directly from protobuf text in a string. Useful for debugging/testing.
#define CHECKED_INT32(VALUE)
A Pooling2dDescriptor for the Pooling2dLayer.
#define ONNX_PARSER_VERSION
ONNX_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch versi...
std::unique_ptr< IOnnxParser, void(*)(IOnnxParser *parser)> IOnnxParserPtr
static ModelPtr LoadModelFromBinaryFile(const char *fileName)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumElements() const