ArmNN
 25.11
Loading...
Searching...
No Matches
TfLiteParserImpl Class Reference

#include <TfLiteParser.hpp>

Public Types

using ModelPtr = std::unique_ptr<tflite::ModelT>
using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>
using OperatorPtr = std::unique_ptr<tflite::OperatorT>
using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>
using TensorPtr = std::unique_ptr<tflite::TensorT>
using TensorRawPtr = const tflite::TensorT *
using TensorRawPtrVector = std::vector<TensorRawPtr>
using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>
using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>
using BufferPtr = std::unique_ptr<tflite::BufferT>
using BufferRawPtr = const tflite::BufferT *

Public Member Functions

armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile)
 Create the network from a flatbuffers binary file on disk.
armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent)
 Create the network from a flatbuffers binary.
BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.
BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.
size_t GetSubgraphCount () const
 Return the number of subgraphs in the parsed model.
std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const
 Return the input tensor names for a given subgraph.
std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const
 Return the output tensor names for a given subgraph.
 TfLiteParserImpl (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 ~TfLiteParserImpl ()=default
armnn::INetworkPtr CreateNetworkFromBinaryAsDynamic (const std::vector< uint8_t > &binaryContent)
armnn::INetworkPtr LoadModel (std::unique_ptr< tflite::ModelT > model)

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
static armnn::TensorInfo OutputShapeOfSqueeze (std::vector< uint32_t > squeezeDims, const armnn::TensorInfo &inputTensorInfo)
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
static const std::string GetVersion ()
 Retrieve version in X.Y.Z form.

Detailed Description

Definition at line 26 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 39 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 40 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 30 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 33 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 32 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 31 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 37 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 38 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 34 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 35 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 36 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParserImpl()

Definition at line 771 of file TfLiteParser.cpp.

772: m_Options(options)
773, m_Network(nullptr, nullptr)
774, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
775{
776 // register supported operators
777 m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
778 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
779 m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
780 m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
781 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
782 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
783 m_ParserFunctions[tflite::BuiltinOperator_BATCH_MATMUL] = &TfLiteParserImpl::ParseBatchMatMul;
784 m_ParserFunctions[tflite::BuiltinOperator_BROADCAST_TO] = &TfLiteParserImpl::ParseBroadcastTo;
785 m_ParserFunctions[tflite::BuiltinOperator_CEIL] = &TfLiteParserImpl::ParseCeil;
786 m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
787 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
788 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
789 // Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
790 #if defined(ARMNN_POST_TFLITE_2_4)
791 m_ParserFunctions[tflite::BuiltinOperator_CONV_3D] = &TfLiteParserImpl::ParseConv3D;
792 #endif
793 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
794 m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
795 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
796 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
797 m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
798 m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
799 m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
800 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
801 m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
802 m_ParserFunctions[tflite::BuiltinOperator_FLOOR_DIV] = &TfLiteParserImpl::ParseFloorDiv;
803 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
804 m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
805 m_ParserFunctions[tflite::BuiltinOperator_GELU] = &TfLiteParserImpl::ParseGelu;
806 m_ParserFunctions[tflite::BuiltinOperator_GATHER_ND] = &TfLiteParserImpl::ParseGatherNd;
807 m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
808 m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
809 m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
810 m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
811 m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
812 m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
813 m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
814 = &TfLiteParserImpl::ParseLocalResponseNormalization;
815 m_ParserFunctions[tflite::BuiltinOperator_LOG] = &TfLiteParserImpl::ParseLog;
816 m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
817 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
818 m_ParserFunctions[tflite::BuiltinOperator_LOG_SOFTMAX] = &TfLiteParserImpl::ParseLogSoftmax;
819 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
820 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
821 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
822 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
823 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
824 m_ParserFunctions[tflite::BuiltinOperator_MIRROR_PAD] = &TfLiteParserImpl::ParseMirrorPad;
825 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
826 m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
827 m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
828 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
829 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
830 m_ParserFunctions[tflite::BuiltinOperator_PADV2] = &TfLiteParserImpl::ParsePad;
831 m_ParserFunctions[tflite::BuiltinOperator_POW] = &TfLiteParserImpl::ParsePower;
832 m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
833 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
834 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
835 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
836 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
837 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
838 m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
839 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
840 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
841 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
842 m_ParserFunctions[tflite::BuiltinOperator_REVERSE_V2] = &TfLiteParserImpl::ParseReverseV2;
843 m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
844 m_ParserFunctions[tflite::BuiltinOperator_SCATTER_ND] = &TfLiteParserImpl::ParseScatterNd;
845 m_ParserFunctions[tflite::BuiltinOperator_SQRT] = &TfLiteParserImpl::ParseSqrt;
846 m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
847 m_ParserFunctions[tflite::BuiltinOperator_SIN] = &TfLiteParserImpl::ParseSin;
848 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
849 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
850 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
851 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_DEPTH] = &TfLiteParserImpl::ParseSpaceToDepth;
852 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
853 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
854 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
855 m_ParserFunctions[tflite::BuiltinOperator_SQUARE] = &TfLiteParserImpl::ParseSquare;
856 m_ParserFunctions[tflite::BuiltinOperator_SQUARED_DIFFERENCE] = &TfLiteParserImpl::ParseSquaredDifference;
857 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
858 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
859 m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
860 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
861 m_ParserFunctions[tflite::BuiltinOperator_TILE] = &TfLiteParserImpl::ParseTile;
862 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
863 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
864 m_ParserFunctions[tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM]
865 = &TfLiteParserImpl::ParseUnidirectionalSequenceLSTM;
866 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
867
868 // register supported custom operators
869 m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
870}

References TfLiteParserImpl().

Referenced by CreateNetworkFromBinaryAsDynamic(), and TfLiteParserImpl().

◆ ~TfLiteParserImpl()

~TfLiteParserImpl ( )
default

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > & binaryContent)

Create the network from a flatbuffers binary.

Definition at line 991 of file TfLiteParser.cpp.

992{
993 ResetParser();
994 m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
995 return CreateNetworkFromModel();
996}

References LoadModelFromBinary().

◆ CreateNetworkFromBinaryAsDynamic()

armnn::INetworkPtr CreateNetworkFromBinaryAsDynamic ( const std::vector< uint8_t > & binaryContent)

References TfLiteParserImpl().

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char * graphFile)

Create the network from a flatbuffers binary file on disk.

Definition at line 984 of file TfLiteParser.cpp.

985{
986 ResetParser();
987 m_Model = LoadModelFromFile(graphFile);
988 return CreateNetworkFromModel();
989}

References LoadModelFromFile().

◆ GetBuffer()

TfLiteParserImpl::BufferRawPtr GetBuffer ( const ModelPtr & model,
size_t bufferIndex )
static

Definition at line 5848 of file TfLiteParser.cpp.

5849{
5850 CHECK_BUFFER(model, bufferIndex);
5851 return model->buffers[bufferIndex].get();
5852}
#define CHECK_BUFFER(MODEL, BUFFER_INDEX)

References CHECK_BUFFER.

◆ GetInputs()

TfLiteParserImpl::TensorRawPtrVector GetInputs ( const ModelPtr & model,
size_t subgraphIndex,
size_t operatorIndex )
static

Definition at line 5536 of file TfLiteParser.cpp.

5539{
5540 CHECK_MODEL(model, subgraphIndex, operatorIndex);
5541
5542 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5543 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
5544
5545 size_t inputCount = operatorPtr->inputs.size();
5546 TensorRawPtrVector result;
5547 for (size_t i = 0; i < inputCount; ++i)
5548 {
5549 // If the input location is -1 then assume input is turned off.
5550 if (operatorPtr->inputs[i] == -1)
5551 {
5552 continue;
5553 }
5554 else
5555 {
5556 uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
5557 result.push_back(subgraphPtr->tensors[inputId].get());
5558 }
5559 }
5560 return result;
5561}
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr & model,
size_t subgraphIndex,
size_t operatorIndex )
static

Definition at line 5616 of file TfLiteParser.cpp.

5619{
5620 CHECK_MODEL(model, subgraphIndex, operatorIndex);
5621 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5622 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
5623 return operatorPtr->inputs;
5624}

References CHECK_MODEL.

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t subgraphId,
const std::string & name ) const

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.

Definition at line 6026 of file TfLiteParser.cpp.

6028{
6029 CHECK_SUBGRAPH(m_Model, subgraphId);
6030 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
6031 for (auto const& input : inputs)
6032 {
6033 if (input.second->name == name)
6034 {
6035 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
6036 auto inputTensorInfo = ToTensorInfo(input.second);
6037 // Input tensors are always treated as constant tensors during network execution.
6038 inputTensorInfo.SetConstant(true);
6039 return std::make_pair(bindingId, inputTensorInfo);
6040 }
6041 }
6042
6043 std::stringstream bindings;
6044 for (auto const& input : inputs)
6045 {
6046 bindings << "'" << input.second->name << "' ";
6047 }
6048
6049 throw ParseException(
6050 fmt::format("No input binding found for subgraph:{} and name:{}. "
6051 "Possible inputs are: [{}] {}",
6052 subgraphId,
6053 name,
6054 bindings.str(),
6055 CHECK_LOCATION().AsString()));
6056}
#define CHECK_LOCATION()
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

References CHECK_LOCATION, CHECK_SUBGRAPH, GetSubgraphInputs(), and TensorInfo::SetConstant().

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t subgraphId,
const std::string & name ) const

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.

Definition at line 6058 of file TfLiteParser.cpp.

6060{
6061 CHECK_SUBGRAPH(m_Model, subgraphId);
6062 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
6063 for (unsigned int i = 0; i < outputs.size(); ++i)
6064 {
6065 auto const output = outputs[i];
6066 if (output.second->name == name)
6067 {
6068 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
6069 std::vector<unsigned int> shape = m_OverriddenOutputShapes.size() > 0 ?
6070 m_OverriddenOutputShapes[i] : AsUnsignedVector(output.second->shape);
6071 return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
6072 }
6073 }
6074
6075 std::stringstream bindings;
6076 for (auto const& output : outputs)
6077 {
6078 bindings << "'" << output.second->name << "' ";
6079 }
6080
6081 throw ParseException(
6082 fmt::format("No output binding found for subgraph:{} and name:{}. "
6083 "Possible outputs are: [{}] {}",
6084 subgraphId,
6085 name,
6086 bindings.str(),
6087 CHECK_LOCATION().AsString()));
6088}

References CHECK_LOCATION, CHECK_SUBGRAPH, and GetSubgraphOutputs().

◆ GetOutputs()

TfLiteParserImpl::TensorRawPtrVector GetOutputs ( const ModelPtr & model,
size_t subgraphIndex,
size_t operatorIndex )
static

Definition at line 5563 of file TfLiteParser.cpp.

5566{
5567 CHECK_MODEL(model, subgraphIndex, operatorIndex);
5568
5569 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5570 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
5571
5572 size_t outputCount = operatorPtr->outputs.size();
5573 TensorRawPtrVector result(outputCount);
5574 for (size_t i = 0; i < outputCount; ++i)
5575 {
5576 uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
5577 CHECK_TENSOR(model, subgraphIndex, outputId);
5578 result[i] = subgraphPtr->tensors[outputId].get();
5579 }
5580 return result;
5581}
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr & model,
size_t subgraphIndex,
size_t operatorIndex )
static

Definition at line 5626 of file TfLiteParser.cpp.

5629{
5630 CHECK_MODEL(model, subgraphIndex, operatorIndex);
5631 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5632 const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
5633 return operatorPtr->outputs;
5634}

References CHECK_MODEL.

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const

Return the number of subgraphs in the parsed model.

Definition at line 6090 of file TfLiteParser.cpp.

6091{
6092 return m_Model->subgraphs.size();
6093}

◆ GetSubgraphInputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr & model,
size_t subgraphIndex )
static

Definition at line 5583 of file TfLiteParser.cpp.

5585{
5586 CHECK_SUBGRAPH(model, subgraphIndex);
5587 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5588
5589 size_t inputCount = subgraphPtr->inputs.size();
5590 TensorIdRawPtrVector result(inputCount);
5591 for (size_t i = 0; i < inputCount; ++i)
5592 {
5593 uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
5594 CHECK_TENSOR(model, subgraphIndex, inputId);
5595 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
5596 }
5597 return result;
5598}

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by GetNetworkInputBindingInfo(), and GetSubgraphInputTensorNames().

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t subgraphId) const

Return the input tensor names for a given subgraph.

Definition at line 6095 of file TfLiteParser.cpp.

6096{
6097 CHECK_SUBGRAPH(m_Model, subgraphId);
6098 auto inputs = GetSubgraphInputs(m_Model, subgraphId);
6099 std::vector<std::string> result;
6100 result.reserve(inputs.size());
6101 for (auto const& input : inputs)
6102 {
6103 result.push_back(input.second->name);
6104 }
6105 return result;
6106}

References CHECK_SUBGRAPH, and GetSubgraphInputs().

◆ GetSubgraphOutputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr & model,
size_t subgraphIndex )
static

Definition at line 5600 of file TfLiteParser.cpp.

5602{
5603 CHECK_SUBGRAPH(model, subgraphIndex);
5604 const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5605
5606 size_t outputCount = subgraphPtr->outputs.size();
5607 TensorIdRawPtrVector result(outputCount);
5608 for (size_t i = 0; i < outputCount; ++i)
5609 {
5610 uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
5611 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
5612 }
5613 return result;
5614}

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by GetNetworkOutputBindingInfo(), and GetSubgraphOutputTensorNames().

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t subgraphId) const

Return the output tensor names for a given subgraph.

Definition at line 6108 of file TfLiteParser.cpp.

6109{
6110 CHECK_SUBGRAPH(m_Model, subgraphId);
6111 auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
6112 std::vector<std::string> result;
6113 result.reserve(outputs.size());
6114 for (auto const& output : outputs)
6115 {
6116 result.push_back(output.second->name);
6117 }
6118 return result;
6119}

References CHECK_SUBGRAPH, and GetSubgraphOutputs().

◆ GetVersion()

const std::string GetVersion ( )
static

Retrieve version in X.Y.Z form.

Definition at line 6121 of file TfLiteParser.cpp.

6122{
6123 return TFLITE_PARSER_VERSION;
6124}
#define TFLITE_PARSER_VERSION
TFLITE_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch ver...
Definition Version.hpp:25

References TFLITE_PARSER_VERSION.

◆ LoadModel()

armnn::INetworkPtr LoadModel ( std::unique_ptr< tflite::ModelT > model)

Definition at line 999 of file TfLiteParser.cpp.

1000{
1001 ResetParser();
1002 m_Model = std::move(model);
1003
1004 return CreateNetworkFromModel();
1005}

◆ LoadModelFromBinary()

TfLiteParserImpl::ModelPtr LoadModelFromBinary ( const uint8_t * binaryContent,
size_t len )
static

Definition at line 5517 of file TfLiteParser.cpp.

5518{
5519 if (binaryContent == nullptr)
5520 {
5521 throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
5522 CHECK_LOCATION().AsString()));
5523 }
5524 flatbuffers::Verifier verifier(binaryContent, len);
5525 if (verifier.VerifyBuffer<tflite::Model>() == false)
5526 {
5527 throw ParseException(
5528 fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
5529 "flatbuffers format. size:{} {}",
5530 len,
5531 CHECK_LOCATION().AsString()));
5532 }
5533 return tflite::UnPackModel(binaryContent);
5534}

References CHECK_LOCATION.

Referenced by CreateNetworkFromBinary(), and LoadModelFromFile().

◆ LoadModelFromFile()

TfLiteParserImpl::ModelPtr LoadModelFromFile ( const char * fileName)
static

Definition at line 5487 of file TfLiteParser.cpp.

5488{
5489 if (fileName == nullptr)
5490 {
5491 throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
5492 CHECK_LOCATION().AsString()));
5493 }
5494 std::error_code errorCode;
5495 fs::path pathToFile(fileName);
5496 if (!fs::exists(pathToFile, errorCode))
5497 {
5498 //fmt::format() could not be used here (format error)
5499 std::stringstream msg;
5500 msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
5501 << " " << CHECK_LOCATION().AsString();
5502 throw FileNotFoundException(msg.str());
5503 }
5504 if (!fs::is_regular_file(pathToFile))
5505 {
5506 // Exclude non regular files.
5507 throw InvalidArgumentException(fmt::format("File \"{}\" is not a regular file and cannot be loaded.",
5508 pathToFile.c_str()));
5509 }
5510
5511 std::ifstream file(fileName, std::ios::binary);
5512 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
5513 return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
5514 fileContent.size());
5515}

References CHECK_LOCATION, and LoadModelFromBinary().

Referenced by CreateNetworkFromBinaryFile().

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo & inputTensorInfo,
const std::vector< int32_t > & targetDimsIn )
static

Definition at line 3387 of file TfLiteParser.cpp.

3389{
3390 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
3391 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
3392
3393 if (stretchDim != targetDimsIn.end())
3394 {
3395 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
3396 {
3397 throw ParseException(
3398 fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
3399 }
3400
3401 auto targetNumElements =
3403 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
3404
3405 auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
3406
3407 if (targetNumElements == 0)
3408 {
3409 if (inputTensorInfo.GetNumElements() == 0)
3410 {
3411 outputDims[stretchIndex] = 0;
3412 }
3413 else
3414 {
3415 throw ParseException(
3416 fmt::format("Input to reshape is a tensor with elements, but the requested shape has 0. {}",
3417 CHECK_LOCATION().AsString()));
3418 }
3419 }
3420 else
3421 {
3422 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
3423 }
3424 }
3425
3426 TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
3427
3428 TensorInfo reshapeInfo = inputTensorInfo;
3429 reshapeInfo.SetShape(outputShape);
3430
3431 return reshapeInfo;
3432}
unsigned int GetNumElements() const
Definition Tensor.hpp:198
void SetShape(const TensorShape &newShape)
Definition Tensor.hpp:195
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)

References CHECK_LOCATION, TensorInfo::GetNumElements(), armnn::numeric_cast(), and TensorInfo::SetShape().

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( std::vector< uint32_t > squeezeDims,
const armnn::TensorInfo & inputTensorInfo )
static

Definition at line 2479 of file TfLiteParser.cpp.

2481{
2482 CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
2483 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2484
2485 if (inputTensorInfo.GetNumDimensions() > 4)
2486 {
2487 std::stringstream ss;
2488 ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2489 << " shape:" << inputTensorInfo.GetShape() << " "
2490 << CHECK_LOCATION().AsString();
2491 throw ParseException(ss.str());
2492 }
2493
2494 if (squeezeDims.empty())
2495 {
2496 squeezeDims.assign(dimensionSequence,
2497 dimensionSequence+inputTensorInfo.GetNumDimensions());
2498 }
2499
2500 std::vector<uint32_t> outputDims;
2501 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2502 {
2503 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2504 auto currentDimension = inputTensorInfo.GetShape()[i];
2505 if (skipSqueeze || currentDimension != 1)
2506 {
2507 outputDims.push_back(currentDimension);
2508 }
2509 }
2510
2511 if (outputDims.size() > 4)
2512 {
2513 std::stringstream ss;
2514 ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2515 << " shape:" << inputTensorInfo.GetShape() << " "
2516 << CHECK_LOCATION().AsString();
2517 throw ParseException(ss.str());
2518 }
2519
2520 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2521 outputDims.data());
2522
2523 // we need to preserve the tensor type and the quantization data as well
2524 TensorInfo outTensorInfo = inputTensorInfo;
2525 outTensorInfo.SetShape(outShape);
2526
2527 return outTensorInfo;
2528}
#define CHECK_VALID_SIZE(ACTUAL,...)
const TensorShape & GetShape() const
Definition Tensor.hpp:193
unsigned int GetNumDimensions() const
Definition Tensor.hpp:197

References CHECK_LOCATION, CHECK_VALID_SIZE, TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), and TensorInfo::SetShape().


The documentation for this class was generated from the following files: