8 #include "google/protobuf/repeated_field.h"
9 #include <unordered_map>
11 #include <onnx/onnx.pb.h>
23 using ModelPtr = std::unique_ptr<onnx::ModelProto>;
28 using OperationParsingFunction = void(
OnnxParserImpl::*)(
const onnx::NodeProto& NodeProto);
32 using GraphPtr = std::unique_ptr<onnx::GraphProto>;
39 const std::map<std::string, armnn::TensorShape>& inputShapes);
46 const std::map<std::string, armnn::TensorShape>& inputShapes);
53 const std::map<std::string, armnn::TensorShape>& inputShapes);
61 const std::map<std::string, armnn::TensorShape>& inputShapes);
96 void SetupInfo(
const google::protobuf::RepeatedPtrField<onnx::ValueInfoProto >* list);
98 std::vector<armnn::TensorInfo> ComputeOutputInfo(
99 std::vector<std::string> outNames,
101 std::vector<armnn::TensorShape> inputShapes,
104 void DetectFullyConnected();
106 template <
typename Location>
107 void GetInputAndParam(
const onnx::NodeProto& node,
108 std::string* inputName,
109 std::string* constName,
110 const Location& location);
112 template <
typename Location>
113 void To1DTensor(
const std::string &name,
const Location& location);
116 std::pair<std::string, std::string> AddPrepareBroadcast(
const std::string& input0,
const std::string& input1);
117 void PrependForBroadcast(
const std::string& outputName,
const std::string& input0,
const std::string& input1);
120 void AddFullyConnected(
const onnx::NodeProto& matmulNode,
const onnx::NodeProto* addNode =
nullptr);
123 void CreateConstantLayer(
const std::string& tensorName,
const std::string& layerName);
124 void CreateInt64ConstantLayer(
const std::string& tensorName,
const std::string& layerName);
125 void CreateReshapeLayer(
const std::string& inputName,
126 const std::string& outputName,
127 const std::string& layerName);
130 void ParseClip(
const onnx::NodeProto& nodeProto);
131 void ParseSigmoid(
const onnx::NodeProto& nodeProto);
132 void ParseTanh(
const onnx::NodeProto& nodeProto);
133 void ParseRelu(
const onnx::NodeProto& nodeProto);
134 void ParseLeakyRelu(
const onnx::NodeProto& nodeProto);
136 void ParseAdd(
const onnx::NodeProto& nodeProto);
137 void ParseAveragePool(
const onnx::NodeProto& nodeProto);
138 void ParseBatchNormalization(
const onnx::NodeProto& node);
139 void ParseConcat(
const onnx::NodeProto& nodeProto);
140 void ParseConstant(
const onnx::NodeProto& nodeProto);
141 void ParseConv(
const onnx::NodeProto& nodeProto);
142 void ParseFlatten(
const onnx::NodeProto& node);
143 void ParseGather(
const onnx::NodeProto& node);
144 void ParseGemm(
const onnx::NodeProto& node);
145 void ParseGlobalAveragePool(
const onnx::NodeProto& node);
146 void ParseMaxPool(
const onnx::NodeProto& nodeProto);
147 void ParseShape(
const onnx::NodeProto& node);
148 void ParseReshape(
const onnx::NodeProto& nodeProto);
149 void ParseUnsqueeze(
const onnx::NodeProto& nodeProto);
152 const std::string& tensorId,
153 unsigned int slotIndex);
157 void SetupInputLayers();
158 void SetupOutputLayers();
163 std::pair<armnn::ConstTensor, std::unique_ptr<float[]>>
164 CreateConstTensor(
const std::string name,
167 std::pair<armnn::ConstTensor, std::unique_ptr<int32_t[]>>
168 CreateInt64ConstTensor(
const std::string name,
171 template <
typename TypeList,
typename Location>
172 void ValidateInputs(
const onnx::NodeProto& node,
173 TypeList validInputs,
174 const Location& location);
185 std::unique_ptr<armnn::TensorInfo> m_info;
186 std::unique_ptr<const onnx::TensorProto> m_tensor;
189 OnnxTensor() : m_info(nullptr), m_tensor(nullptr), m_dtype(onnx::TensorProto::FLOAT) { }
190 bool isConstant() {
return m_tensor !=
nullptr; }
193 std::unordered_map<std::string, OnnxTensor> m_TensorsInfo;
196 static const std::map<std::string, OperationParsingFunction> m_ParserFunctions;
204 std::vector<armnn::IInputSlot*> inputSlots;
206 TensorSlots() : outputSlot(nullptr) { }
209 std::unordered_map<std::string, TensorSlots> m_TensorConnections;
212 std::unordered_map<std::string, std::pair<const onnx::NodeProto*, int>> m_OutputsMap;
218 std::vector<size_t> fusedWithNodes;
219 size_t inputForNodes;
221 UsageSummary() : fusedWithNodes({}), inputForNodes(0) { }
225 std::vector<UsageSummary> m_OutputsFusedAndUsed;
227 std::map<std::string, armnn::TensorShape> m_InputShapes;
229 std::unordered_map<std::string, armnn::TensorInfo> m_InputInfos;
231 std::unordered_map<std::string, armnn::TensorInfo> m_OutputInfos;