ArmNN
 25.11
Loading...
Searching...
No Matches
ParserPrototxtFixture.hpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/IRuntime.hpp>
9#include <armnnTestUtils/TensorHelpers.hpp>
10
11#include <Network.hpp>
13
14#include <doctest/doctest.h>
15#include <fmt/format.h>
16
17#include <iomanip>
18#include <string>
19
20namespace armnnUtils
21{
22
23template<typename TParser>
25{
28 : m_Parser(TParser::Create())
29 , m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions()))
31 {
32 }
34
35 /// Parses and loads the network defined by the m_Prototext string.
36 /// @{
37 void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName);
38 void SetupSingleInputSingleOutput(const armnn::TensorShape& inputTensorShape,
39 const std::string& inputName,
40 const std::string& outputName);
41 void SetupSingleInputSingleOutput(const armnn::TensorShape& inputTensorShape,
42 const armnn::TensorShape& outputTensorShape,
43 const std::string& inputName,
44 const std::string& outputName);
45 void Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
46 const std::vector<std::string>& requestedOutputs);
47 void Setup(const std::map<std::string, armnn::TensorShape>& inputShapes);
48 void Setup();
50 const std::map<std::string,armnn::TensorShape>& inputShapes,
51 const std::vector<std::string>& requestedOutputs);
52 /// @}
53
54 /// Executes the network with the given input tensor and checks the result against the given output tensor.
55 /// This overload assumes that the network has a single input and a single output.
56 template <std::size_t NumOutputDimensions>
57 void RunTest(const std::vector<float>& inputData, const std::vector<float>& expectedOutputData);
58
59 /// Executes the network with the given input tensor and checks the result against the given output tensor.
60 /// Calls RunTest with output type of uint8_t for checking comparison operators.
61 template <std::size_t NumOutputDimensions>
62 void RunComparisonTest(const std::map<std::string, std::vector<float>>& inputData,
63 const std::map<std::string, std::vector<uint8_t>>& expectedOutputData);
64
65 /// Executes the network with the given input tensors and checks the results against the given output tensors.
66 /// This overload supports multiple inputs and multiple outputs, identified by name.
67 template <std::size_t NumOutputDimensions, typename T = float>
68 void RunTest(const std::map<std::string, std::vector<float>>& inputData,
69 const std::map<std::string, std::vector<T>>& expectedOutputData);
70
71 std::string m_Prototext;
72 std::unique_ptr<TParser, void(*)(TParser* parser)> m_Parser;
75
76 /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
77 /// so they don't need to be passed to the single-input-single-output overload of RunTest().
78 /// @{
79 std::string m_SingleInputName;
80 std::string m_SingleOutputName;
81 /// @}
82
83 /// This will store the output shape so it don't need to be passed to the single-input-single-output overload
84 /// of RunTest().
86};
87
88template<typename TParser>
90 const std::string& outputName)
91{
92 // Stores the input and output name so they don't need to be passed to the single-input-single-output RunTest().
93 m_SingleInputName = inputName;
94 m_SingleOutputName = outputName;
95 Setup({ }, { outputName });
96}
97
98template<typename TParser>
100 const std::string& inputName,
101 const std::string& outputName)
102{
103 // Stores the input and output name so they don't need to be passed to the single-input-single-output RunTest().
104 m_SingleInputName = inputName;
105 m_SingleOutputName = outputName;
106 Setup({ { inputName, inputTensorShape } }, { outputName });
107}
108
109template<typename TParser>
111 const armnn::TensorShape& outputTensorShape,
112 const std::string& inputName,
113 const std::string& outputName)
114{
115 // Stores the input name, the output name and the output tensor shape
116 // so they don't need to be passed to the single-input-single-output RunTest().
117 m_SingleInputName = inputName;
118 m_SingleOutputName = outputName;
119 m_SingleOutputShape = outputTensorShape;
120 Setup({ { inputName, inputTensorShape } }, { outputName });
121}
122
123template<typename TParser>
124void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
125 const std::vector<std::string>& requestedOutputs)
126{
127 std::string errorMessage;
128
129 armnn::INetworkPtr network =
130 m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
131 auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
132 armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optimized), errorMessage);
133 if (ret != armnn::Status::Success)
134 {
135 throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
136 errorMessage,
137 CHECK_LOCATION().AsString()));
138 }
139}
140
141template<typename TParser>
142void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::TensorShape>& inputShapes)
143{
144 std::string errorMessage;
146 armnn::INetworkPtr network =
147 m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes);
148 auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
149 armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optimized), errorMessage);
150 if (ret != armnn::Status::Success)
151 {
152 throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
153 errorMessage,
154 CHECK_LOCATION().AsString()));
155 }
157}
158
159template<typename TParser>
161{
162 std::string errorMessage;
164 armnn::INetworkPtr network =
165 m_Parser->CreateNetworkFromString(m_Prototext.c_str());
166 auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
167 armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optimized), errorMessage);
168 if (ret != armnn::Status::Success)
169 {
170 throw armnn::Exception(fmt::format("LoadNetwork failed with error: '{0}' {1}",
171 errorMessage,
172 CHECK_LOCATION().AsString()));
173 }
175}
176
177template<typename TParser>
179 const std::map<std::string,armnn::TensorShape>& inputShapes,
180 const std::vector<std::string>& requestedOutputs)
181{
182 armnn::INetworkPtr network =
183 m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
184 auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
185 return optimized;
186}
187
188template<typename TParser>
189template <std::size_t NumOutputDimensions>
190void ParserPrototxtFixture<TParser>::RunTest(const std::vector<float>& inputData,
191 const std::vector<float>& expectedOutputData)
192{
193 RunTest<NumOutputDimensions>({ { m_SingleInputName, inputData } }, { { m_SingleOutputName, expectedOutputData } });
194}
195
196template<typename TParser>
197template <std::size_t NumOutputDimensions>
198void ParserPrototxtFixture<TParser>::RunComparisonTest(const std::map<std::string, std::vector<float>>& inputData,
199 const std::map<std::string, std::vector<uint8_t>>&
200 expectedOutputData)
201{
202 RunTest<NumOutputDimensions, uint8_t>(inputData, expectedOutputData);
203}
204
205template<typename TParser>
206template <std::size_t NumOutputDimensions, typename T>
207void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::vector<float>>& inputData,
208 const std::map<std::string, std::vector<T>>& expectedOutputData)
209{
210 // Sets up the armnn input tensors from the given vectors.
211 armnn::InputTensors inputTensors;
212 for (auto&& it : inputData)
213 {
214 armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(it.first);
215 bindingInfo.second.SetConstant(true);
216 inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
217 if (bindingInfo.second.GetNumElements() != it.second.size())
218 {
219 throw armnn::Exception(fmt::format("Input tensor {0} is expected to have {1} elements. "
220 "{2} elements supplied. {3}",
221 it.first,
222 bindingInfo.second.GetNumElements(),
223 it.second.size(),
224 CHECK_LOCATION().AsString()));
225 }
226 }
227
228 // Allocates storage for the output tensors to be written to and sets up the armnn output tensors.
229 std::map<std::string, std::vector<T>> outputStorage;
230 armnn::OutputTensors outputTensors;
231 for (auto&& it : expectedOutputData)
232 {
233 armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
234 outputStorage.emplace(it.first, std::vector<T>(bindingInfo.second.GetNumElements()));
235 outputTensors.push_back(
236 { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
237 }
238
239 m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
240
241 // Compares each output tensor to the expected values.
242 for (auto&& it : expectedOutputData)
243 {
244 armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
245 if (bindingInfo.second.GetNumElements() != it.second.size())
246 {
247 throw armnn::Exception(fmt::format("Output tensor {0} is expected to have {1} elements. "
248 "{2} elements supplied. {3}",
249 it.first,
250 bindingInfo.second.GetNumElements(),
251 it.second.size(),
252 CHECK_LOCATION().AsString()));
253 }
254
255 // If the expected output shape is set, the output tensor checks will be carried out.
256 if (m_SingleOutputShape.GetNumDimensions() != 0)
257 {
258
259 if (bindingInfo.second.GetShape().GetNumDimensions() == NumOutputDimensions &&
260 bindingInfo.second.GetShape().GetNumDimensions() == m_SingleOutputShape.GetNumDimensions())
261 {
262 for (unsigned int i = 0; i < m_SingleOutputShape.GetNumDimensions(); ++i)
263 {
264 if (m_SingleOutputShape[i] != bindingInfo.second.GetShape()[i])
265 {
266 // This exception message could not be created by fmt:format because of an oddity in
267 // the operator << of TensorShape.
268 std::stringstream message;
269 message << "Output tensor " << it.first << " is expected to have "
270 << bindingInfo.second.GetShape() << "shape. "
271 << m_SingleOutputShape << " shape supplied. "
272 << CHECK_LOCATION().AsString();
273 throw armnn::Exception(message.str());
274 }
275 }
276 }
277 else
278 {
279 throw armnn::Exception(fmt::format("Output tensor {0} is expected to have {1} dimensions. "
280 "{2} dimensions supplied. {3}",
281 it.first,
282 bindingInfo.second.GetShape().GetNumDimensions(),
283 NumOutputDimensions,
284 CHECK_LOCATION().AsString()));
285 }
286 }
287
288 auto outputExpected = it.second;
289 auto shape = bindingInfo.second.GetShape();
290 if (std::is_same<T, uint8_t>::value)
291 {
292 auto result = CompareTensors(outputExpected, outputStorage[it.first], shape, shape, true);
293 CHECK_MESSAGE(result.m_Result, result.m_Message.str());
294 }
295 else
296 {
297 auto result = CompareTensors(outputExpected, outputStorage[it.first], shape, shape);
298 CHECK_MESSAGE(result.m_Result, result.m_Message.str());
299 }
300 }
301}
302
303} // namespace armnnUtils
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_END
#define CHECK_LOCATION()
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition Tensor.hpp:330
Base class for all ArmNN exceptions so that users can filter to just those.
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition Tensor.hpp:322
Copyright (c) 2021 ARM Limited and Contributors.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition IRuntime.hpp:39
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition Tensor.hpp:276
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition INetwork.hpp:340
Status
enumeration
Definition Types.hpp:43
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition Tensor.hpp:394
int NetworkId
Definition IRuntime.hpp:33
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptionsOpaque &options=OptimizerOptionsOpaque(), Optional< std::vector< std::string > & > messages=EmptyOptional())
Create an optimized version of the network.
Definition Network.cpp:2287
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition INetwork.hpp:339
@ CpuRef
CPU Execution: Reference C++ kernels.
Definition BackendId.hpp:25
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition Tensor.hpp:395
ARMNN_NO_DEPRECATE_WARN_END void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)
Parses and loads the network defined by the m_Prototext string.
armnn::TensorShape m_SingleOutputShape
This will store the output shape so it don't need to be passed to the single-input-single-output over...
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...
void Setup(const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
void RunComparisonTest(const std::map< std::string, std::vector< float > > &inputData, const std::map< std::string, std::vector< uint8_t > > &expectedOutputData)
Executes the network with the given input tensor and checks the result against the given output tenso...
armnn::IOptimizedNetworkPtr SetupOptimizedNetwork(const std::map< std::string, armnn::TensorShape > &inputShapes, const std::vector< std::string > &requestedOutputs)
std::unique_ptr< TParser, void(*)(TParser *parser)> m_Parser
ARMNN_NO_DEPRECATE_WARN_BEGIN ParserPrototxtFixture()
void RunTest(const std::vector< float > &inputData, const std::vector< float > &expectedOutputData)
Executes the network with the given input tensor and checks the result against the given output tenso...