|
24.02
|
Helper classes. More...
Classes | |
| class | ArmnnDevice |
| class | ArmnnDriver |
| class | ArmnnDriverImpl |
| class | ArmnnPreparedModel |
| struct | CanonicalExecutionContext |
| class | ConstTensorPin |
| struct | ConversionData |
| class | Converter |
| class | DriverOptions |
| class | LayerInputHandle |
| class | ModelToINetworkTransformer |
| class | UnsupportedOperand |
Typedefs | |
| template<typename TensorType > | |
| using | DumpElementFunction = void(*)(const TensorType &tensor, unsigned int elementIndex, std::ofstream &fileStream) |
| using | Model = ::android::nn::Model |
| Helper classes. More... | |
| using | Operand = ::android::nn::Operand |
| using | OperandLifeTime = ::android::nn::Operand::LifeTime |
| using | OperandType = ::android::nn::OperandType |
| using | Operation = ::android::nn::Operation |
| using | OperationType = ::android::nn::OperationType |
| using | ErrorStatus = ::android::nn::ErrorStatus |
| using | DequantizeResult = std::tuple< std::unique_ptr< float[]>, size_t, armnn::TensorInfo, DequantizeStatus > |
| using | Half = half_float::half |
Enumerations | |
| enum | ConversionResult { Success, ErrorMappingPools, UnsupportedFeature } |
| enum | DequantizeStatus { SUCCESS, NOT_REQUIRED, INVALID_OPERAND } |
Functions | |
| void | SwizzleAndroidNn4dTensorToArmNn (armnn::TensorInfo &tensor, const void *input, void *output, const armnn::PermutationVector &mappings) |
| Swizzles tensor data in input according to the dimension mappings. More... | |
| void * | GetMemoryFromPool (DataLocation location, const std::vector< android::nn::RunTimePoolInfo > &memPools) |
| Returns a pointer to a specific location in a pool`. More... | |
| void * | GetMemoryFromPointer (const Request::Argument &requestArg) |
| armnn::TensorInfo | GetTensorInfoForOperand (const Operand &operand) |
| std::string | GetOperandSummary (const Operand &operand) |
| template<typename TensorType > | |
| void | DumpTensor (const std::string &dumpDir, const std::string &requestName, const std::string &tensorName, const TensorType &tensor) |
| template void | DumpTensor< armnn::ConstTensor > (const std::string &dumpDir, const std::string &requestName, const std::string &tensorName, const armnn::ConstTensor &tensor) |
| template void | DumpTensor< armnn::Tensor > (const std::string &dumpDir, const std::string &requestName, const std::string &tensorName, const armnn::Tensor &tensor) |
| void | DumpJsonProfilingIfRequired (bool gpuProfilingEnabled, const std::string &dumpDir, armnn::NetworkId networkId, const armnn::IProfiler *profiler) |
| std::string | ExportNetworkGraphToDotFile (const armnn::IOptimizedNetwork &optimizedNetwork, const std::string &dumpDir) |
| std::string | SerializeNetwork (const armnn::INetwork &network, const std::string &dumpDir, std::vector< uint8_t > &dataCacheData, bool dataCachingActive) |
| bool | IsDynamicTensor (const armnn::TensorInfo &outputInfo) |
| Checks if a tensor info represents a dynamic tensor. More... | |
| bool | AreDynamicTensorsSupported (void) |
| Checks for ArmNN support of dynamic tensors. More... | |
| bool | isQuantizedOperand (const OperandType &operandType) |
| std::string | GetModelSummary (const Model &model) |
| std::string | GetFileTimestamp () |
| void | RenameExportedFiles (const std::string &existingSerializedFileName, const std::string &existingDotFileName, const std::string &dumpDir, const armnn::NetworkId networkId) |
| void | RenameFile (const std::string &existingName, const std::string &extension, const std::string &dumpDir, const armnn::NetworkId networkId) |
| void | CommitPools (std::vector<::android::nn::RunTimePoolInfo > &memPools) |
| OutputShape | ComputeShape (const armnn::TensorInfo &info) |
| bool | IsWeightsValid (const Operation &operation, uint32_t inputIndex, const Model &model, const bool isOptional=true) |
| Utility functions. More... | |
| ConstTensorPin | ConvertOperandToConstTensorPin (const Operand &operand, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings, const armnn::TensorShape *overrideTensorShape, bool optional, const armnn::DataType *overrideDataType) |
| LayerInputHandle | ConvertToLayerInputHandle (const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle) |
| bool | ConvertPaddings (const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor) |
| bool | ConvertPooling2d (const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data) |
| bool | ConvertReduce (const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation) |
| bool | ConvertToActivation (const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data) |
| DequantizeResult | DequantizeIfRequired (size_t operand_index, const Operation &operation, const Model &model, const ConversionData &data) |
| ConstTensorPin | DequantizeAndMakeConstTensorPin (const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional) |
| bool | GetInputPaddingScheme (const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data) |
| const void * | GetOperandValueReadOnlyAddress (const Operand &operand, const Model &model, const ConversionData &data, bool optional) |
| bool | GetTensorInt32Values (const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data) |
| armnn::DataLayout | OptionalDataLayout (const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data) |
| armnn::IConnectableLayer * | ProcessActivation (const armnn::TensorInfo &tensorInfo, ActivationFn activation, armnn::IConnectableLayer *prevLayer, ConversionData &data) |
| bool | SetupAndTrackLayerOutputSlot (const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes) |
| bool | IsConnectedToDequantize (armnn::IOutputSlot *ioutputSlot) |
| const Operand * | GetInputOperand (const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true) |
| const Operand * | GetOutputOperand (const Operation &operation, uint32_t outputIndex, const Model &model) |
| bool | GetOperandType (const Operation &operation, uint32_t inputIndex, const Model &model, OperandType &type) |
| bool | IsOperandConstant (const Operand &operand) |
| ConstTensorPin | ConvertOperationInputToConstTensorPin (const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false) |
| template<typename OutputType > | |
| bool | GetInputScalar (const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false) |
| bool | GetInputInt32 (const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data) |
| bool | GetInputFloat32 (const Operation &operation, uint32_t inputIndex, float &outValue, const Model &model, const ConversionData &data) |
| bool | GetInputActivationFunctionImpl (const Operation &operation, uint32_t inputIndex, OperandType type, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data) |
| bool | GetInputActivationFunction (const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data) |
| bool | GetInputActivationFunctionFromTensor (const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data) |
| bool | GetOptionalInputActivation (const Operation &operation, uint32_t inputIndex, ActivationFn &activationFunction, const Model &model, const ConversionData &data) |
| template<typename ConvolutionDescriptor > | |
| bool | GetOptionalConvolutionDilationParams (const Operation &operation, uint32_t dilationXIndex, ConvolutionDescriptor &descriptor, const Model &model, const ConversionData &data) |
| bool | GetOptionalBool (const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data) |
| bool | SetupAndTrackLayerOutputSlot (const Operation &operation, uint32_t outputIndex, armnn::IConnectableLayer &layer, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo=nullptr, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc=nullptr, const ActivationFn &activationFunction=ActivationFn::kActivationNone) |
| bool | IsQSymm8 (const Operand &operand) |
Variables | |
| const armnn::PermutationVector | g_DontPermute {} |
Helper classes.
| using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus> |
Definition at line 1047 of file ConversionUtils.hpp.
| using DumpElementFunction = void (*)(const TensorType& tensor, unsigned int elementIndex, std::ofstream& fileStream) |
Definition at line 206 of file CanonicalUtils.cpp.
| using ErrorStatus = ::android::nn::ErrorStatus |
Definition at line 51 of file ConversionUtils.hpp.
| using Half = half_float::half |
Definition at line 15 of file Converter.cpp.
| using Model = ::android::nn::Model |
Helper classes.
Definition at line 45 of file ConversionUtils.hpp.
| using Operand = ::android::nn::Operand |
Definition at line 46 of file ConversionUtils.hpp.
| using OperandLifeTime = ::android::nn::Operand::LifeTime |
Definition at line 47 of file ConversionUtils.hpp.
| using OperandType = ::android::nn::OperandType |
Definition at line 48 of file ConversionUtils.hpp.
| using Operation = ::android::nn::Operation |
Definition at line 49 of file ConversionUtils.hpp.
| using OperationType = ::android::nn::OperationType |
Definition at line 50 of file ConversionUtils.hpp.
|
strong |
| Enumerator | |
|---|---|
| Success | |
| ErrorMappingPools | |
| UnsupportedFeature | |
Definition at line 127 of file ConversionUtils.hpp.
|
strong |
| Enumerator | |
|---|---|
| SUCCESS | |
| NOT_REQUIRED | |
| INVALID_OPERAND | |
Definition at line 1040 of file ConversionUtils.hpp.
| bool AreDynamicTensorsSupported | ( | ) |
Checks for ArmNN support of dynamic tensors.
Definition at line 505 of file CanonicalUtils.cpp.
Referenced by ConvertPooling2d(), ConvertReduce(), and ConvertToActivation().
| void CommitPools | ( | std::vector<::android::nn::RunTimePoolInfo > & | memPools | ) |
Definition at line 615 of file CanonicalUtils.cpp.
Referenced by ArmnnPreparedModel::ExecuteGraph().
|
inline |
Definition at line 95 of file CanonicalUtils.hpp.
References TensorShape::GetDimensionality(), TensorShape::GetNumDimensions(), and armnn::Scalar.
Referenced by ArmnnPreparedModel::execute().
| ConstTensorPin ConvertOperandToConstTensorPin | ( | const Operand & | operand, |
| const Model & | model, | ||
| const ConversionData & | data, | ||
| const armnn::PermutationVector & | dimensionMappings, | ||
| const armnn::TensorShape * | overrideTensorShape, | ||
| bool | optional, | ||
| const armnn::DataType * | overrideDataType | ||
| ) |
Definition at line 166 of file ConversionUtils.cpp.
References GetOperandValueReadOnlyAddress(), GetTensorInfoForOperand(), IsOperandConstant(), TensorInfo::SetConstant(), TensorInfo::SetDataType(), and TensorInfo::SetShape().
Referenced by ConvertOperationInputToConstTensorPin(), and ConvertToLayerInputHandle().
|
inline |
Definition at line 751 of file ConversionUtils.hpp.
References ConvertOperandToConstTensorPin(), and GetInputOperand().
Referenced by DequantizeAndMakeConstTensorPin().
| bool ConvertPaddings | ( | const Operation & | operation, |
| const Model & | model, | ||
| ConversionData & | data, | ||
| unsigned int | rank, | ||
| armnn::PadDescriptor & | padDescriptor | ||
| ) |
Definition at line 350 of file ConversionUtils.cpp.
References GetInputOperand(), TensorShape::GetNumDimensions(), TensorShape::GetNumElements(), GetTensorInt32Values(), and PadDescriptor::m_PadList.
| bool ConvertPooling2d | ( | const Operation & | operation, |
| const char * | operationName, | ||
| armnn::PoolingAlgorithm | poolType, | ||
| const Model & | model, | ||
| ConversionData & | data | ||
| ) |
Definition at line 392 of file ConversionUtils.cpp.
References AreDynamicTensorsSupported(), LayerInputHandle::Connect(), ConvertToLayerInputHandle(), armnn::Floor, FORWARD_LAYER_SUPPORT_FUNC, DataLayoutIndexed::GetHeightIndex(), GetInputActivationFunction(), GetInputPaddingScheme(), GetInputScalar(), IConnectableLayer::GetInputSlot(), GetOutputOperand(), TensorInfo::GetShape(), LayerInputHandle::GetTensorInfo(), GetTensorInfoForOperand(), DataLayoutIndexed::GetWidthIndex(), IsDynamicTensor(), LayerInputHandle::IsValid(), ConversionData::m_Backends, Pooling2dDescriptor::m_DataLayout, ConversionData::m_Network, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, armnn::NHWC, OptionalDataLayout(), IConnectableLayer::SetBackendId(), and SetupAndTrackLayerOutputSlot().
| bool ConvertReduce | ( | const Operation & | operation, |
| const Model & | model, | ||
| ConversionData & | data, | ||
| armnn::ReduceOperation | reduceOperation | ||
| ) |
Definition at line 520 of file ConversionUtils.cpp.
References AreDynamicTensorsSupported(), LayerInputHandle::Connect(), ConvertToLayerInputHandle(), FORWARD_LAYER_SUPPORT_FUNC, GetInputOperand(), GetInputScalar(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), GetOutputOperand(), LayerInputHandle::GetTensorInfo(), GetTensorInfoForOperand(), GetTensorInt32Values(), IsDynamicTensor(), LayerInputHandle::IsValid(), ConversionData::m_Backends, ReduceDescriptor::m_KeepDims, ConversionData::m_Network, ReduceDescriptor::m_ReduceOperation, ReduceDescriptor::m_vAxis, IConnectableLayer::SetBackendId(), and SetupAndTrackLayerOutputSlot().
Referenced by Converter::ConvertOperation().
| bool ConvertToActivation | ( | const Operation & | operation, |
| const char * | operationName, | ||
| const armnn::ActivationDescriptor & | activationDesc, | ||
| const Model & | model, | ||
| ConversionData & | data | ||
| ) |
Definition at line 604 of file ConversionUtils.cpp.
References AreDynamicTensorsSupported(), LayerInputHandle::Connect(), ConvertToLayerInputHandle(), FORWARD_LAYER_SUPPORT_FUNC, IConnectableLayer::GetInputSlot(), GetOutputOperand(), LayerInputHandle::GetTensorInfo(), GetTensorInfoForOperand(), IsDynamicTensor(), LayerInputHandle::IsValid(), ConversionData::m_Backends, ConversionData::m_Network, IConnectableLayer::SetBackendId(), and SetupAndTrackLayerOutputSlot().
| LayerInputHandle ConvertToLayerInputHandle | ( | const Operation & | operation, |
| uint32_t | inputIndex, | ||
| const Model & | model, | ||
| ConversionData & | data, | ||
| const armnn::PermutationVector & | dimensionMappings, | ||
| const LayerInputHandle * | inputHandle | ||
| ) |
Definition at line 216 of file ConversionUtils.cpp.
References ConvertOperandToConstTensorPin(), FORWARD_LAYER_SUPPORT_FUNC, ConstTensorPin::GetConstTensor(), TensorInfo::GetDataType(), BaseTensor< MemoryType >::GetInfo(), GetInputOperand(), IConnectableLayer::GetOutputSlot(), GetTensorInfoForOperand(), IsDynamicTensor(), ConstTensorPin::IsValid(), ConversionData::m_Backends, ConversionData::m_DynamicInputsEncountered, ConversionData::m_Network, ConversionData::m_OutputSlotForOperand, UnsupportedOperand< OperandType >::m_type, IConnectableLayer::SetBackendId(), and IOutputSlot::SetTensorInfo().
Referenced by ConvertPooling2d(), ConvertReduce(), and ConvertToActivation().
| ConstTensorPin DequantizeAndMakeConstTensorPin | ( | const Operation & | operation, |
| const Model & | model, | ||
| const ConversionData & | data, | ||
| size_t | operandIndex, | ||
| bool | optional | ||
| ) |
Definition at line 752 of file ConversionUtils.cpp.
References ConvertOperationInputToConstTensorPin(), DequantizeIfRequired(), g_DontPermute, INVALID_OPERAND, NOT_REQUIRED, and SUCCESS.
| DequantizeResult DequantizeIfRequired | ( | size_t | operand_index, |
| const Operation & | operation, | ||
| const Model & | model, | ||
| const ConversionData & | data | ||
| ) |
Definition at line 663 of file ConversionUtils.cpp.
References armnn::Float32, GetInputOperand(), getMainModel(), GetOperandValueReadOnlyAddress(), INVALID_OPERAND, IsOperandConstant(), IsQSymm8(), NOT_REQUIRED, and SUCCESS.
Referenced by DequantizeAndMakeConstTensorPin().
| void DumpJsonProfilingIfRequired | ( | bool | gpuProfilingEnabled, |
| const std::string & | dumpDir, | ||
| armnn::NetworkId | networkId, | ||
| const armnn::IProfiler * | profiler | ||
| ) |
Definition at line 352 of file CanonicalUtils.cpp.
References IProfiler::Print().
Referenced by ArmnnPreparedModel::~ArmnnPreparedModel().
| void DumpTensor | ( | const std::string & | dumpDir, |
| const std::string & | requestName, | ||
| const std::string & | tensorName, | ||
| const TensorType & | tensor | ||
| ) |
Definition at line 219 of file CanonicalUtils.cpp.
References TensorShape::AreAllDimensionsSpecified(), armnn::Boolean, armnn::Float16, armnn::Float32, TensorShape::GetNumElements(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::Signed32.
| template void armnn_driver::DumpTensor< armnn::ConstTensor > | ( | const std::string & | dumpDir, |
| const std::string & | requestName, | ||
| const std::string & | tensorName, | ||
| const armnn::ConstTensor & | tensor | ||
| ) |
| template void armnn_driver::DumpTensor< armnn::Tensor > | ( | const std::string & | dumpDir, |
| const std::string & | requestName, | ||
| const std::string & | tensorName, | ||
| const armnn::Tensor & | tensor | ||
| ) |
| std::string ExportNetworkGraphToDotFile | ( | const armnn::IOptimizedNetwork & | optimizedNetwork, |
| const std::string & | dumpDir | ||
| ) |
Definition at line 392 of file CanonicalUtils.cpp.
References GetFileTimestamp(), IOptimizedNetwork::SerializeToDot(), and armnn::Success.
Referenced by ArmnnDriverImpl::PrepareArmnnModel(), and ArmnnDriverImpl::PrepareArmnnModelFromCache().
| std::string GetFileTimestamp | ( | ) |
Definition at line 560 of file CanonicalUtils.cpp.
Referenced by ExportNetworkGraphToDotFile(), and SerializeNetwork().
|
inline |
Definition at line 856 of file ConversionUtils.hpp.
References GetInputActivationFunctionImpl().
Referenced by ConvertPooling2d(), and GetOptionalInputActivation().
|
inline |
|
inline |
Definition at line 833 of file ConversionUtils.hpp.
References GetInputScalar().
Referenced by GetInputActivationFunction(), and GetInputActivationFunctionFromTensor().
|
inline |
|
inline |
Definition at line 815 of file ConversionUtils.hpp.
References GetInputScalar().
Referenced by GetInputPaddingScheme().
|
inline |
Definition at line 662 of file ConversionUtils.hpp.
References getMainModel().
Referenced by ConvertOperationInputToConstTensorPin(), ConvertPaddings(), ConvertReduce(), ConvertToLayerInputHandle(), DequantizeIfRequired(), GetInputScalar(), GetOperandType(), GetOptionalBool(), IsWeightsValid(), and OptionalDataLayout().
| bool GetInputPaddingScheme | ( | const Operation & | operation, |
| uint32_t | inputIndex, | ||
| PaddingScheme & | outPaddingScheme, | ||
| const Model & | model, | ||
| const ConversionData & | data | ||
| ) |
Definition at line 782 of file ConversionUtils.cpp.
References GetInputInt32().
Referenced by ConvertPooling2d().
| bool armnn_driver::GetInputScalar | ( | const Operation & | operation, |
| uint32_t | inputIndex, | ||
| OperandType | type, | ||
| OutputType & | outValue, | ||
| const Model & | model, | ||
| const ConversionData & | data, | ||
| bool | optional = false |
||
| ) |
Definition at line 775 of file ConversionUtils.hpp.
References GetInputOperand(), and GetOperandValueReadOnlyAddress().
Referenced by ConvertPooling2d(), ConvertReduce(), GetInputActivationFunctionImpl(), GetInputFloat32(), GetInputInt32(), and GetOptionalConvolutionDilationParams().
| void * GetMemoryFromPointer | ( | const Request::Argument & | requestArg | ) |
Definition at line 77 of file CanonicalUtils.cpp.
| void * GetMemoryFromPool | ( | DataLocation | location, |
| const std::vector< android::nn::RunTimePoolInfo > & | memPools | ||
| ) |
Returns a pointer to a specific location in a pool`.
Definition at line 66 of file CanonicalUtils.cpp.
Referenced by GetOperandValueReadOnlyAddress().
| std::string GetModelSummary | ( | const Model & | model | ) |
Definition at line 526 of file CanonicalUtils.cpp.
References GetOperandSummary().
Referenced by ArmnnPreparedModel::execute(), and ArmnnPreparedModel::executeFenced().
| std::string GetOperandSummary | ( | const Operand & | operand | ) |
|
inline |
| const void * GetOperandValueReadOnlyAddress | ( | const Operand & | operand, |
| const Model & | model, | ||
| const ConversionData & | data, | ||
| bool | optional | ||
| ) |
Definition at line 798 of file ConversionUtils.cpp.
References GetMemoryFromPool(), and ConversionData::m_MemPools.
Referenced by ConvertOperandToConstTensorPin(), DequantizeIfRequired(), GetInputScalar(), GetOptionalBool(), GetTensorInt32Values(), and OptionalDataLayout().
|
inline |
Definition at line 933 of file ConversionUtils.hpp.
References GetInputOperand(), and GetOperandValueReadOnlyAddress().
| bool armnn_driver::GetOptionalConvolutionDilationParams | ( | const Operation & | operation, |
| uint32_t | dilationXIndex, | ||
| ConvolutionDescriptor & | descriptor, | ||
| const Model & | model, | ||
| const ConversionData & | data | ||
| ) |
|
inline |
|
inline |
Definition at line 688 of file ConversionUtils.hpp.
References getMainModel().
Referenced by ConvertPooling2d(), ConvertReduce(), ConvertToActivation(), and SetupAndTrackLayerOutputSlot().
| armnn::TensorInfo GetTensorInfoForOperand | ( | const Operand & | operand | ) |
Definition at line 97 of file CanonicalUtils.cpp.
References ARMNN_FALLTHROUGH, armnn::Boolean, armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Scalar, and armnn::Signed32.
Referenced by ConvertOperandToConstTensorPin(), ConvertPooling2d(), ConvertReduce(), ConvertToActivation(), ConvertToLayerInputHandle(), and SetupAndTrackLayerOutputSlot().
| bool GetTensorInt32Values | ( | const Operand & | operand, |
| std::vector< int32_t > & | outValues, | ||
| const Model & | model, | ||
| const ConversionData & | data | ||
| ) |
Definition at line 843 of file ConversionUtils.cpp.
References GetOperandValueReadOnlyAddress().
Referenced by ConvertPaddings(), and ConvertReduce().
| bool IsConnectedToDequantize | ( | armnn::IOutputSlot * | ioutputSlot | ) |
Definition at line 1064 of file ConversionUtils.cpp.
References armnn::Constant, armnn::Dequantize, IInputSlot::GetConnection(), IConnectableLayer::GetInputSlot(), IOutputSlot::GetOwningIConnectableLayer(), and IConnectableLayer::GetType().
| bool IsDynamicTensor | ( | const armnn::TensorInfo & | tensorInfo | ) |
Checks if a tensor info represents a dynamic tensor.
Definition at line 491 of file CanonicalUtils.cpp.
References TensorShape::AreAllDimensionsSpecified(), TensorShape::GetDimensionality(), TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), and armnn::NotSpecified.
Referenced by ConvertPooling2d(), ConvertReduce(), ConvertToActivation(), ConvertToLayerInputHandle(), and SetupAndTrackLayerOutputSlot().
|
inline |
Definition at line 731 of file ConversionUtils.hpp.
Referenced by ConvertOperandToConstTensorPin(), and DequantizeIfRequired().
|
inline |
Definition at line 1035 of file ConversionUtils.hpp.
Referenced by DequantizeIfRequired().
| bool isQuantizedOperand | ( | const OperandType & | operandType | ) |
Definition at line 510 of file CanonicalUtils.cpp.
| bool IsWeightsValid | ( | const Operation & | operation, |
| uint32_t | inputIndex, | ||
| const Model & | model, | ||
| const bool | isOptional = true |
||
| ) |
Utility functions.
Definition at line 141 of file ConversionUtils.cpp.
References GetInputOperand().
| armnn::DataLayout OptionalDataLayout | ( | const Operation & | operation, |
| uint32_t | inputIndex, | ||
| const Model & | model, | ||
| ConversionData & | data | ||
| ) |
Definition at line 874 of file ConversionUtils.cpp.
References GetInputOperand(), GetOperandValueReadOnlyAddress(), armnn::NCHW, and armnn::NHWC.
Referenced by ConvertPooling2d().
| armnn::IConnectableLayer * ProcessActivation | ( | const armnn::TensorInfo & | tensorInfo, |
| ActivationFn | activation, | ||
| armnn::IConnectableLayer * | prevLayer, | ||
| ConversionData & | data | ||
| ) |
Definition at line 906 of file ConversionUtils.cpp.
References armnn::BoundedReLu, IOutputSlot::Connect(), FORWARD_LAYER_SUPPORT_FUNC, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, ConversionData::m_Backends, ActivationDescriptor::m_Function, ConversionData::m_Network, armnn::ReLu, IConnectableLayer::SetBackendId(), IOutputSlot::SetTensorInfo(), armnn::Sigmoid, and armnn::TanH.
Referenced by SetupAndTrackLayerOutputSlot().
| void RenameExportedFiles | ( | const std::string & | existingSerializedFileName, |
| const std::string & | existingDotFileName, | ||
| const std::string & | dumpDir, | ||
| const armnn::NetworkId | networkId | ||
| ) |
Definition at line 580 of file CanonicalUtils.cpp.
References RenameFile().
Referenced by ArmnnDriverImpl::PrepareArmnnModel().
| void RenameFile | ( | const std::string & | existingName, |
| const std::string & | extension, | ||
| const std::string & | dumpDir, | ||
| const armnn::NetworkId | networkId | ||
| ) |
| std::string SerializeNetwork | ( | const armnn::INetwork & | network, |
| const std::string & | dumpDir, | ||
| std::vector< uint8_t > & | dataCacheData, | ||
| bool | dataCachingActive | ||
| ) |
Definition at line 432 of file CanonicalUtils.cpp.
References ISerializer::Create(), and GetFileTimestamp().
Referenced by ArmnnDriverImpl::PrepareArmnnModel().
| bool SetupAndTrackLayerOutputSlot | ( | const Operation & | operation, |
| uint32_t | operationOutputIndex, | ||
| armnn::IConnectableLayer & | layer, | ||
| uint32_t | layerOutputIndex, | ||
| const Model & | model, | ||
| ConversionData & | data, | ||
| const armnn::TensorInfo * | overrideOutputInfo, | ||
| const std::function< void(const armnn::TensorInfo &, bool &)> & | validateFunc, | ||
| const ActivationFn & | activationFunction, | ||
| bool | inferOutputShapes | ||
| ) |
Definition at line 987 of file ConversionUtils.cpp.
References IOutputSlot::Disconnect(), IInputSlot::GetConnection(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), GetOutputOperand(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), GetTensorInfoForOperand(), IsDynamicTensor(), IOutputSlot::IsTensorInfoSet(), ConversionData::m_OutputSlotForOperand, ProcessActivation(), and IOutputSlot::SetTensorInfo().
Referenced by ConvertPooling2d(), ConvertReduce(), ConvertToActivation(), and SetupAndTrackLayerOutputSlot().
|
inline |
| void SwizzleAndroidNn4dTensorToArmNn | ( | armnn::TensorInfo & | tensorInfo, |
| const void * | input, | ||
| void * | output, | ||
| const armnn::PermutationVector & | mappings | ||
| ) |
Swizzles tensor data in input according to the dimension mappings.
Definition at line 40 of file CanonicalUtils.cpp.
References armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::GetDataTypeSize(), TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), armnnUtils::Permute(), armnnUtils::Permuted(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS8.
Referenced by ConstTensorPin::ConstTensorPin().
| const armnn::PermutationVector g_DontPermute {} |
Definition at line 38 of file CanonicalUtils.cpp.
Referenced by DequantizeAndMakeConstTensorPin().