16 #include "../TfLiteParser.hpp" 22 #include <fmt/format.h> 23 #include <doctest/doctest.h> 25 #include "flatbuffers/idl.h" 26 #include "flatbuffers/util.h" 27 #include "flatbuffers/flexbuffers.h" 29 #include <schema_generated.h> 43 ITfLiteParser::TfLiteParserOptions options;
44 options.m_StandInLayerForUnsupported =
true;
45 options.m_InferAndValidate =
true;
47 m_Parser = std::make_unique<armnnTfLiteParser::TfLiteParserImpl>(
57 std::unique_ptr<armnnTfLiteParser::TfLiteParserImpl>
m_Parser;
64 void Setup(
bool testDynamic =
true)
66 m_TestDynamic = testDynamic;
77 const uint8_t* binaryContent = graphBinary.data();
78 const size_t len = graphBinary.size();
79 if (binaryContent ==
nullptr)
84 flatbuffers::Verifier verifier(binaryContent, len);
85 if (verifier.VerifyBuffer<tflite::Model>() ==
false)
88 "flatbuffers format. size:{} {}",
92 auto model = tflite::UnPackModel(binaryContent);
94 for (
auto const& subgraph : model->subgraphs)
96 std::vector<int32_t> inputIds = subgraph->inputs;
97 for (
unsigned int tensorIndex = 0; tensorIndex < subgraph->tensors.size(); ++tensorIndex)
99 if (std::find(inputIds.begin(), inputIds.end(), tensorIndex) != inputIds.end())
103 for (
auto const& tensor : subgraph->tensors)
105 if (tensor->shape_signature.size() != 0)
110 for (
unsigned int i = 0; i < tensor->shape.size(); ++i)
112 tensor->shape_signature.push_back(-1);
129 : m_Parser->CreateNetworkFromBinary(m_GraphBinary);
136 m_Runtime->GetDeviceSpec());
137 std::string errorMessage;
139 armnn::Status ret = m_Runtime->LoadNetwork(networkId, move(optimized), errorMessage);
144 fmt::format(
"The runtime failed to load the network. " 145 "Error was: {}. in {} [{}:{}]",
156 m_SingleInputName = inputName;
157 m_SingleOutputName = outputName;
166 flatbuffers::Parser parser;
168 bool ok = parser.Parse(schemafile.c_str());
171 ok &= parser.Parse(m_JsonString.c_str());
180 const uint8_t * bufferPtr = parser.builder_.GetBufferPointer();
181 size_t size =
static_cast<size_t>(parser.builder_.GetSize());
182 m_GraphBinary.assign(bufferPtr, bufferPtr+size);
189 template <std::size_t NumOutputDimensions,
191 void RunTest(
size_t subgraphId,
197 template <std::size_t NumOutputDimensions,
199 void RunTest(
size_t subgraphId,
207 template <std::size_t NumOutputDimensions,
210 void RunTest(
size_t subgraphId,
213 bool isDynamic =
false);
219 template <std::size_t NumOutputDimensions,
223 void RunTest(
size_t subgraphId,
234 void RunTest(std::size_t subgraphId,
241 flexbuffers::Builder detectPostProcess;
242 detectPostProcess.Map([&]() {
247 detectPostProcess.Int(
"num_classes", descriptor.
m_NumClasses);
250 detectPostProcess.Float(
"h_scale", descriptor.
m_ScaleH);
251 detectPostProcess.Float(
"w_scale", descriptor.
m_ScaleW);
252 detectPostProcess.Float(
"x_scale", descriptor.
m_ScaleX);
253 detectPostProcess.Float(
"y_scale", descriptor.
m_ScaleY);
255 detectPostProcess.Finish();
258 std::stringstream strStream;
259 std::vector<uint8_t> buffer = detectPostProcess.GetBuffer();
260 std::copy(buffer.begin(), buffer.end(),std::ostream_iterator<int>(strStream,
","));
262 return strStream.str();
266 tflite::TensorType tensorType, uint32_t buffer,
const std::string& name,
267 const std::vector<float>& min,
const std::vector<float>& max,
268 const std::vector<float>& scale,
const std::vector<int64_t>& zeroPoint)
271 CHECK_EQ(shapeSize, tensors->shape.size());
272 CHECK(std::equal(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end()));
273 CHECK_EQ(tensorType, tensors->type);
274 CHECK_EQ(buffer, tensors->buffer);
275 CHECK_EQ(name, tensors->name);
276 CHECK(tensors->quantization);
277 CHECK(std::equal(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
278 tensors->quantization.get()->min.end()));
279 CHECK(std::equal(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
280 tensors->quantization.get()->max.end()));
281 CHECK(std::equal(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
282 tensors->quantization.get()->scale.end()));
283 CHECK(std::equal(zeroPoint.begin(), zeroPoint.end(),
284 tensors->quantization.get()->zero_point.begin(),
285 tensors->quantization.get()->zero_point.end()));
290 template <armnn::DataType dataType>
297 template <armnn::DataType dataType>
298 void ParserFlatbuffersFixture::FillInputTensors(
303 for (
auto&& it : inputData)
307 inputTensors.push_back({ bindingInfo.first,
armnn::ConstTensor(bindingInfo.second, it.second.data()) });
314 template <std::size_t NumOutputDimensions,
320 RunTest<NumOutputDimensions, armnnType>(subgraphId,
328 template <std::size_t NumOutputDimensions,
334 RunTest<NumOutputDimensions, armnnType, armnnType>(subgraphId, inputData, expectedOutputData);
341 template <std::size_t NumOutputDimensions,
353 FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
356 std::map<std::string, std::vector<DataType2>> outputStorage;
358 for (
auto&& it : expectedOutputData)
365 CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
366 fmt::format(
"Number of dimensions expected {}, but got {} for output layer {}",
372 outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.
GetNumElements()));
373 outputTensors.push_back(
374 { outputBindingId,
armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
380 for (
auto&& it : expectedOutputData)
383 auto outputExpected = it.second;
384 if (std::is_same<DataType2, uint8_t>::value)
386 auto result =
CompareTensors(outputExpected, outputStorage[it.first],
387 bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
389 CHECK_MESSAGE(result.m_Result, result.m_Message.str());
393 auto result =
CompareTensors(outputExpected, outputStorage[it.first],
394 bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
396 CHECK_MESSAGE(result.m_Result, result.m_Message.str());
405 for (
auto&& it : expectedOutputData)
408 auto outputExpected = it.second;
409 auto result =
CompareTensors(outputExpected, outputStorage[it.first],
410 bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
412 CHECK_MESSAGE(result.m_Result, result.m_Message.str());
431 FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId);
434 outputTensors.reserve(expectedOutputData.size());
435 std::map<std::string, std::vector<DataType2>> outputStorage;
436 for (
auto&& it : expectedOutputData)
441 std::vector<DataType2> out(it.second.size());
442 outputStorage.emplace(it.first, out);
443 outputTensors.push_back({ bindingInfo.first,
445 outputStorage.at(it.first).data()) });
451 for (
auto&& it : expectedOutputData)
453 std::vector<armnn::ResolveType<armnnType2>> out = outputStorage.at(it.first);
455 for (
unsigned int i = 0; i < out.size(); ++i)
457 CHECK(doctest::Approx(it.second[i]).epsilon(0.000001f) == out[i]);
467 template <std::size_t NumOutputDimensions,
480 FillInputTensors<inputType1>(inputTensors, input1Data, subgraphId);
481 FillInputTensors<inputType2>(inputTensors, input2Data, subgraphId);
484 std::map<std::string, std::vector<DataType2>> outputStorage;
486 for (
auto&& it : expectedOutputData)
493 CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
494 fmt::format(
"Number of dimensions expected {}, but got {} for output layer {}",
500 outputStorage.emplace(it.first, std::vector<DataType2>(outputTensorInfo.
GetNumElements()));
501 outputTensors.push_back(
502 { outputBindingId,
armnn::Tensor(outputTensorInfo, outputStorage.at(it.first).data()) });
508 for (
auto&& it : expectedOutputData)
511 auto outputExpected = it.second;
512 if (std::is_same<DataType2, uint8_t>::value)
514 auto result =
CompareTensors(outputExpected, outputStorage[it.first],
515 bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
true);
516 CHECK_MESSAGE(result.m_Result, result.m_Message.str());
520 auto result =
CompareTensors(outputExpected, outputStorage[it.first],
521 bindingInfo.second.GetShape(), bindingInfo.second.GetShape());
522 CHECK_MESSAGE(result.m_Result, result.m_Message.str());
armnn::NetworkId m_NetworkIdentifier
float m_ScaleW
Center size encoding scale weight.
static std::string GenerateDetectionPostProcessJsonString(const armnn::DetectionPostProcessDescriptor &descriptor)
CPU Execution: Reference C++ kernels.
void loadNetwork(armnn::NetworkId networkId, bool loadDynamic)
float m_ScaleX
Center size encoding scale x.
ParserFlatbuffersFixture()
void CheckTensors(const TensorRawPtr &tensors, size_t shapeSize, const std::vector< int32_t > &shape, tflite::TensorType tensorType, uint32_t buffer, const std::string &name, const std::vector< float > &min, const std::vector< float > &max, const std::vector< float > &scale, const std::vector< int64_t > &zeroPoint)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
typename ResolveTypeImpl< DT >::Type ResolveType
bool ReadStringToBinary()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
armnn::PredicateResult CompareTensors(const std::vector< T > &actualData, const std::vector< T > &expectedData, const armnn::TensorShape &actualShape, const armnn::TensorShape &expectedShape, bool compareBoolean=false, bool isDynamic=false)
std::unique_ptr< ITfLiteParser, void(*)(ITfLiteParser *parser)> ITfLiteParserPtr
Copyright (c) 2021 ARM Limited and Contributors.
void RunTest(size_t subgraphId, const std::vector< armnn::ResolveType< ArmnnType >> &inputData, const std::vector< armnn::ResolveType< ArmnnType >> &expectedOutputData)
Executes the network with the given input tensor and checks the result against the given output tenso...
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
uint32_t m_MaxDetections
Maximum numbers of detections.
std::unique_ptr< armnnTfLiteParser::TfLiteParserImpl > m_Parser
float m_NmsIouThreshold
Intersection over union threshold.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
#define ARMNN_ASSERT_MSG(COND, MSG)
armnnSerializer::TensorInfo * TensorRawPtr
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
uint32_t m_NumClasses
Number of classes.
bool m_UseRegularNms
Use Regular NMS.
std::string m_SingleOutputName
std::vector< uint8_t > m_GraphBinary
armnn::IRuntimePtr m_Runtime
float m_ScaleH
Center size encoding scale height.
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)
armnn::NetworkId m_DynamicNetworkIdentifier
Base class for all ArmNN exceptions so that users can filter to just those.
void Setup(bool testDynamic=true)
void VerifyTensorInfoDataType(const armnn::TensorInfo &info, armnn::DataType dataType)
float m_ScaleY
Center size encoding scale y.
unsigned char g_TfLiteSchemaText[]
float m_NmsScoreThreshold
NMS score threshold.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
std::unique_ptr< tflite::ModelT > MakeModelDynamic(std::vector< uint8_t > graphBinary)
std::string m_SingleInputName
If the single-input-single-output overload of Setup() is called, these will store the input and outpu...
unsigned int GetNumDimensions() const
unsigned int g_TfLiteSchemaText_len
unsigned int GetNumElements() const