ArmNN
 25.11
Loading...
Searching...
No Matches
TensorInfo Class Reference

#include <Tensor.hpp>

Public Types

using DifferenceType = std::vector<TensorInfo>::difference_type

Public Member Functions

 TensorInfo ()
 Empty (invalid) constructor.
 TensorInfo (const TensorShape &shape, DataType dataType, float quantizationScale=1.0f, int32_t quantizationOffset=0, bool isConstant=false)
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, float quantizationScale=1.0f, int32_t quantizationOffset=0, bool isConstant=false)
 TensorInfo (const TensorShape &shape, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim, bool isConstant=false)
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim, bool isConstant=false)
 TensorInfo (const TensorInfo &other)
TensorInfooperator= (const TensorInfo &other)
bool operator== (const TensorInfo &other) const
bool operator!= (const TensorInfo &other) const
const TensorShapeGetShape () const
TensorShapeGetShape ()
void SetShape (const TensorShape &newShape)
unsigned int GetNumDimensions () const
unsigned int GetNumElements () const
DataType GetDataType () const
void SetDataType (DataType type)
bool HasMultipleQuantizationScales () const
bool HasPerAxisQuantization () const
std::vector< float > GetQuantizationScales () const
void SetQuantizationScales (const std::vector< float > &scales)
float GetQuantizationScale () const
void SetQuantizationScale (float scale)
int32_t GetQuantizationOffset () const
void SetQuantizationOffset (int32_t offset)
Optional< unsigned int > GetQuantizationDim () const
void SetQuantizationDim (const Optional< unsigned int > &quantizationDim)
bool IsQuantized () const
bool IsConstant () const
void SetConstant (const bool IsConstant=true)
 Marks the data corresponding to this tensor info as constant.
bool IsTypeSpaceMatch (const TensorInfo &other) const
 Check that the types are the same and, if quantize, that the quantization parameters are the same.
unsigned int GetNumBytes () const

Detailed Description

Member Typedef Documentation

◆ DifferenceType

using DifferenceType = std::vector<TensorInfo>::difference_type

Definition at line 191 of file Tensor.hpp.

Constructor & Destructor Documentation

◆ TensorInfo() [1/6]

Empty (invalid) constructor.

Definition at line 341 of file Tensor.cpp.

342: m_DataType(DataType::Float32), m_IsConstant(false)
343{
344}

References armnn::Float32.

Referenced by IsTypeSpaceMatch(), operator!=(), operator=(), operator==(), and TensorInfo().

◆ TensorInfo() [2/6]

TensorInfo ( const TensorShape & shape,
DataType dataType,
float quantizationScale = 1.0f,
int32_t quantizationOffset = 0,
bool isConstant = false )

Definition at line 346 of file Tensor.cpp.

351 : m_Shape(shape)
352 , m_DataType(dataType)
353 , m_IsConstant(isConstant)
354{
355 SetQuantizationScale(quantizationScale);
356 SetQuantizationOffset(quantizationOffset);
357}

References SetQuantizationOffset(), and SetQuantizationScale().

◆ TensorInfo() [3/6]

TensorInfo ( unsigned int numDimensions,
const unsigned int * dimensionSizes,
DataType dataType,
float quantizationScale = 1.0f,
int32_t quantizationOffset = 0,
bool isConstant = false )

Definition at line 359 of file Tensor.cpp.

365 : m_Shape(numDimensions, dimensionSizes), m_DataType(dataType), m_IsConstant(isConstant)
366{
367 SetQuantizationScale(quantizationScale);
368 SetQuantizationOffset(quantizationOffset);
369}

References SetQuantizationOffset(), and SetQuantizationScale().

◆ TensorInfo() [4/6]

TensorInfo ( const TensorShape & shape,
DataType dataType,
const std::vector< float > & quantizationScales,
unsigned int quantizationDim,
bool isConstant = false )

Definition at line 371 of file Tensor.cpp.

376 : m_Shape(shape)
377 , m_DataType(dataType)
378 , m_IsConstant(isConstant)
379{
380 SetQuantizationScales(quantizationScales);
381 SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
382}

References armnn::MakeOptional(), SetQuantizationDim(), and SetQuantizationScales().

◆ TensorInfo() [5/6]

TensorInfo ( unsigned int numDimensions,
const unsigned int * dimensionSizes,
DataType dataType,
const std::vector< float > & quantizationScales,
unsigned int quantizationDim,
bool isConstant = false )

Definition at line 384 of file Tensor.cpp.

390 : m_Shape(numDimensions, dimensionSizes)
391 , m_DataType(dataType)
392 , m_IsConstant(isConstant)
393{
394 SetQuantizationScales(quantizationScales);
395 SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
396}

References armnn::MakeOptional(), SetQuantizationDim(), and SetQuantizationScales().

◆ TensorInfo() [6/6]

TensorInfo ( const TensorInfo & other)

Definition at line 398 of file Tensor.cpp.

399: m_Shape(other.m_Shape)
400, m_DataType(other.m_DataType)
401, m_IsConstant(other.m_IsConstant)
402, m_Quantization(other.m_Quantization)
403{}

References TensorInfo().

Member Function Documentation

◆ GetDataType()

DataType GetDataType ( ) const
inline

Definition at line 200 of file Tensor.hpp.

200{ return m_DataType; }

Referenced by BiasAndWeightsTypesMatch::BiasAndWeightsTypesMatch(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::ClCastValidate(), armnn::ClConvertFp16ToFp32WorkloadValidate(), armnn::ClConvertFp32ToFp16WorkloadValidate(), armnn::ClMultiplicationWorkloadValidate(), armnn::ClTileWorkloadValidate(), armnn::ClUnidirectionalSequenceLstmFloatWorkloadValidate(), ConvertQuantizeToTosaOperator(), ConvertSoftmaxToTosaOperator(), RefGatherWorkload::Execute(), RefTileWorkload::Execute(), Layer::GetDataType(), armnn::GetLayerInOutDatatype(), armnn::optimizations::pad_fold::GetLowestElement(), armnn::GetUnpaddedTensorStrides(), armnn::InitializeArmComputeClTensorData(), armnn::InitializeArmComputeTensorData(), armnn::InitializeArmComputeTensorData(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), SampleDynamicLayerSupport::IsAdditionSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsConvolution3dSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), NeonLayerSupport::IsFloorSupported(), armnn::IsLayerSequence(), ClLayerSupport::IsQLstmSupported(), NeonLayerSupport::IsQLstmSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), NeonLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::LstmImpl(), armnn::NeonCastValidate(), armnn::NeonMultiplicationWorkloadValidate(), armnn::NeonTileWorkloadValidate(), armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn::NeonUnidirectionalSequenceLstmWorkloadValidate(), armnn::Pad(), armnn::PermuteTensor(), ConvertFp32NetworkToFp16Impl::Run(), armnnUtils::ToFloatArray(), ArgMinMaxQueueDescriptor::Validate(), ComparisonQueueDescriptor::Validate(), ConvertFp16ToFp32QueueDescriptor::Validate(), ConvertFp32ToFp16QueueDescriptor::Validate(), Convolution2dQueueDescriptor::Validate(), Convolution3dQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), EqualQueueDescriptor::Validate(), FullyConnectedQueueDescriptor::Validate(), GatherNdQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), GreaterQueueDescriptor::Validate(), LogicalBinaryQueueDescriptor::Validate(), QuantizeQueueDescriptor::Validate(), TransposeConvolution2dQueueDescriptor::Validate(), ConvertConstDequantisationLayersToConstLayersImpl::~ConvertConstDequantisationLayersToConstLayersImpl(), ConvertConstPermuteLayersToConstLayers::~ConvertConstPermuteLayersToConstLayers(), and TurboConvertConstDequantisationLayersToConstLayersImpl::~TurboConvertConstDequantisationLayersToConstLayersImpl().

◆ GetNumBytes()

unsigned int GetNumBytes ( ) const

Definition at line 427 of file Tensor.cpp.

428{
429 return GetDataTypeSize(m_DataType) * GetNumElements();
430}
constexpr unsigned int GetDataTypeSize(DataType dataType)

References armnn::GetDataTypeSize(), and GetNumElements().

Referenced by armnnOnnxParser::CreateConstTensorImpl(), armnn::PermuteTensor(), armnn::ReorderWeightChannelsForAcl(), and ScopedTensorHandle::ScopedTensorHandle().

◆ GetNumDimensions()

unsigned int GetNumDimensions ( ) const
inline

Definition at line 197 of file Tensor.hpp.

197{ return m_Shape.GetNumDimensions(); }

Referenced by armnn::ArgMinMax(), armnn::BatchToSpaceNd(), armnn::BuildAddMulAddTensorInfoLists(), armnn::CalculateGatherNdKeyIndices(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::ClArgMinMaxWorkloadValidate(), armnn::ClBatchToSpaceNdWorkloadValidate(), armnn::ClChannelShuffleValidate(), ClGatherNdWorkload::ClGatherNdWorkload(), armnn::ClGatherNdWorkloadValidate(), armnn::ClMeanValidate(), armnn::ClReduceWorkloadValidate(), armnn::ClSpaceToBatchNdWorkloadValidate(), armnn::ClSplitterWorkloadValidate(), armnn::ClStridedSliceWorkloadValidate(), armnn::CollapseLeadingUnitDimensions(), armnn::ComputeAclAxis(), armnn::ComputeReductionTensorShape(), armnn::ComputeSoftmaxAclAxis(), armnn::Concatenate(), armnn::ConnectedToLayerType(), ConvertSoftmaxToTosaOperator(), armnn::Gather(), armnn::GetNumActivations(), RefLayerSupport::IsMeanSupported(), armnn::LogSoftmax(), armnn::NeonArgMinMaxWorkloadValidate(), armnn::NeonBatchToSpaceNdWorkloadValidate(), armnn::NeonChannelShuffleValidate(), NeonGatherNdWorkload::NeonGatherNdWorkload(), armnn::NeonGatherNdWorkloadValidate(), armnn::NeonMeanWorkloadValidate(), armnn::NeonReduceWorkloadValidate(), armnn::NeonSpaceToBatchNdWorkloadValidate(), armnn::NeonSplitterWorkloadValidate(), armnn::NeonStridedSliceWorkloadValidate(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::PrintOutput(), armnnUtils::ProcessConcatInputTensorInfo(), armnn::Reduce(), armnn::ReverseV2(), AddBroadcastReshapeLayerImpl::Run(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), armnn::ScatterNd(), armnn::ScatterNd(), armnn::Softmax(), armnn::SpaceToBatchNd(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnn::Tile(), BatchMatMulQueueDescriptor::Validate(), FullyConnectedQueueDescriptor::Validate(), GatherNdQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), InstanceNormalizationQueueDescriptor::Validate(), L2NormalizationQueueDescriptor::Validate(), MeanQueueDescriptor::Validate(), PadQueueDescriptor::Validate(), ReverseV2QueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), SpaceToBatchNdQueueDescriptor::Validate(), StridedSliceQueueDescriptor::Validate(), TileQueueDescriptor::Validate(), QueueDescriptor::ValidateTensorNumDimensions(), ReduceLayer::ValidateTensorShapesFromInputs(), and ScatterNdLayer::ValidateTensorShapesFromInputs().

◆ GetNumElements()

◆ GetQuantizationDim()

Optional< unsigned int > GetQuantizationDim ( ) const

Definition at line 498 of file Tensor.cpp.

499{
500 return m_Quantization.m_QuantizationDim;
501}

Referenced by armnnUtils::ToFloatArray().

◆ GetQuantizationOffset()

◆ GetQuantizationScale()

float GetQuantizationScale ( ) const

Definition at line 461 of file Tensor.cpp.

462{
463 if (m_Quantization.m_Scales.empty())
464 {
465 // NOTE: old default for backward compatibility
466 return 1.0f;
467 }
468
469 if (HasMultipleQuantizationScales())
470 {
471 throw RuntimeException("Invalid call to GetQuantizationScale on a tensor with multiple scale values. Use "
472 "GetQuantizationScales instead.");
473 }
474 return m_Quantization.m_Scales[0];
475}

References HasMultipleQuantizationScales().

Referenced by ConvertQuantizeToTosaOperator(), ConvertSoftmaxToTosaOperator(), armnn::optimizations::pad_fold::GetLowestElement(), armnn::IsMultiAxesReduceSupported(), IsTypeSpaceMatch(), QuantizationParametersAreEqual::QuantizationParametersAreEqual(), armnnUtils::ToFloatArray(), QLstmQueueDescriptor::Validate(), and ConvertConstDequantisationLayersToConstLayersImpl::~ConvertConstDequantisationLayersToConstLayersImpl().

◆ GetQuantizationScales()

std::vector< float > GetQuantizationScales ( ) const

Definition at line 451 of file Tensor.cpp.

452{
453 return m_Quantization.m_Scales;
454}

Referenced by armnn::IsMultiAxesReduceSupported(), and armnnUtils::ToFloatArray().

◆ GetShape() [1/2]

TensorShape & GetShape ( )
inline

Definition at line 194 of file Tensor.hpp.

194{ return m_Shape; }

◆ GetShape() [2/2]

const TensorShape & GetShape ( ) const
inline

Definition at line 193 of file Tensor.hpp.

193{ return m_Shape; }

Referenced by armnn::ArgMinMax(), armnn::BatchNormImpl(), armnn::BatchToSpaceNd(), armnn::CalculateGatherNdKeyIndices(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), ClGatherNdWorkload::ClGatherNdWorkload(), ClLstmFloatWorkload::ClLstmFloatWorkload(), armnn::ClUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn::CollapseLeadingUnitDimensions(), armnn::Concatenate(), armnn::Convert1HWOTensorInfoToAcl(), armnn::Convert1HWOTensorToAcl(), armnn::Convert1HWOtoMIHW(), ConvertConstantToTosaOperator(), ConvertQuantizeToTosaOperator(), ConvertSoftmaxToTosaOperator(), armnn::ConvertWeightTensorFromArmnnToAcl(), armnn::CreateAclNormalizationLayerInfoForL2Normalization(), armnnOnnxParser::CreateConstTensorImpl(), armnn::DepthToSpace(), armnn::DetectionPostProcess(), armnn::ExecuteFunction(), armnn::Gather(), armnn::GetNumActivations(), armnn::GetUnpaddedTensorStrides(), armnn::InstanceNorm(), armnn::IsMultiAxesReduceSupported(), ClLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsSplitterSupported(), armnn::LogSoftmax(), armnn::LstmImpl(), armnn::MirrorPad(), NeonGatherNdWorkload::NeonGatherNdWorkload(), NeonLstmFloatWorkload::NeonLstmFloatWorkload(), armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn::NeonUnidirectionalSequenceLstmWorkloadValidate(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::Pad(), armnn::PermuteTensor(), armnn::Pooling2d(), armnn::Pooling3d(), armnn::PreluImpl(), armnn::PrintOutput(), armnnUtils::ProcessConcatInputTensorInfo(), armnn::Reduce(), armnnUtils::ReduceDims(), RefTransposeConvolution2dWorkload::RefTransposeConvolution2dWorkload(), armnn::ReshapeWeightsForAcl(), armnn::Resize(), armnn::ReverseV2(), AddBroadcastReshapeLayerImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), OptimizeConsecutiveReshapesImpl::Run(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), armnn::ScatterNd(), armnn::ScatterNd(), Convolution2dLayer::SerializeLayerParameters(), Convolution3dLayer::SerializeLayerParameters(), DepthwiseConvolution2dLayer::SerializeLayerParameters(), Graph::SerializeToDot(), ShapesAreBroadcastCompatible::ShapesAreBroadcastCompatible(), ShapesAreSameRank::ShapesAreSameRank(), armnn::Slice(), armnn::Softmax(), armnn::SpaceToBatchNd(), armnn::SpaceToDepth(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnn::StridedSlice(), armnn::Tile(), armnnUtils::ToFloatArray(), ArgMinMaxQueueDescriptor::Validate(), BatchMatMulQueueDescriptor::Validate(), BatchToSpaceNdQueueDescriptor::Validate(), DepthToSpaceQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), DetectionPostProcessQueueDescriptor::Validate(), PermuteQueueDescriptor::Validate(), QLstmQueueDescriptor::Validate(), QuantizedLstmQueueDescriptor::Validate(), ResizeQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), SpaceToBatchNdQueueDescriptor::Validate(), SpaceToDepthQueueDescriptor::Validate(), TransposeQueueDescriptor::Validate(), QueueDescriptor::ValidateTensorNumDimensions(), AbsLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), BatchMatMulLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), BroadcastToLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), ChannelShuffleLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), Convolution3dLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), ElementwiseBinaryLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), GatherNdLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), Pooling3dLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), QLstmLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ReverseV2Layer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ScatterNdLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), TileLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), ConvertConstDequantisationLayersToConstLayersImpl::~ConvertConstDequantisationLayersToConstLayersImpl(), ConvertConstPermuteLayersToConstLayers::~ConvertConstPermuteLayersToConstLayers(), PermuteAsReshapeImpl::~PermuteAsReshapeImpl(), TransposeAsReshapeImpl::~TransposeAsReshapeImpl(), and TurboConvertConstDequantisationLayersToConstLayersImpl::~TurboConvertConstDequantisationLayersToConstLayersImpl().

◆ HasMultipleQuantizationScales()

bool HasMultipleQuantizationScales ( ) const
inline

Definition at line 203 of file Tensor.hpp.

203{ return m_Quantization.m_Scales.size() > 1; }

Referenced by GetQuantizationScale(), HasPerAxisQuantization(), armnn::IsMultiAxesReduceSupported(), and IsTypeSpaceMatch().

◆ HasPerAxisQuantization()

bool HasPerAxisQuantization ( ) const

Definition at line 446 of file Tensor.cpp.

447{
448 return HasMultipleQuantizationScales() || m_Quantization.m_QuantizationDim.has_value();
449}

References HasMultipleQuantizationScales().

Referenced by armnn::Convert1HWOtoMIHW(), and armnnUtils::ToFloatArray().

◆ IsConstant()

◆ IsQuantized()

bool IsQuantized ( ) const

◆ IsTypeSpaceMatch()

bool IsTypeSpaceMatch ( const TensorInfo & other) const

Check that the types are the same and, if quantize, that the quantization parameters are the same.

Definition at line 432 of file Tensor.cpp.

433{
434 bool match = true;
435
436 match &= m_DataType == other.m_DataType;
437
438 if (IsQuantized() && !HasMultipleQuantizationScales())
439 {
440 match &= GetQuantizationScale() == other.GetQuantizationScale() &&
441 GetQuantizationOffset() == other.GetQuantizationOffset();
442 }
443 return match;
444}

References GetQuantizationOffset(), GetQuantizationScale(), HasMultipleQuantizationScales(), IsQuantized(), and TensorInfo().

Referenced by ClLayerSupport::IsConcatSupported(), NeonLayerSupport::IsConcatSupported(), ClLayerSupport::IsSplitterSupported(), and NeonLayerSupport::IsSplitterSupported().

◆ operator!=()

bool operator!= ( const TensorInfo & other) const

Definition at line 422 of file Tensor.cpp.

423{
424 return !(*this == other);
425}

References TensorInfo().

◆ operator=()

TensorInfo & operator= ( const TensorInfo & other)

Definition at line 405 of file Tensor.cpp.

406{
407 m_Shape = other.m_Shape;
408 m_DataType = other.m_DataType;
409 m_Quantization = other.m_Quantization;
410 m_IsConstant = other.m_IsConstant;
411 return *this;
412}

References TensorInfo().

◆ operator==()

bool operator== ( const TensorInfo & other) const

Definition at line 414 of file Tensor.cpp.

415{
416 return ((m_Shape == other.m_Shape) &&
417 (m_DataType == other.m_DataType) &&
418 (m_Quantization == other.m_Quantization) &&
419 (m_IsConstant == other.m_IsConstant));
420}

References TensorInfo().

◆ SetConstant()

void SetConstant ( const bool IsConstant = true)

◆ SetDataType()

◆ SetQuantizationDim()

void SetQuantizationDim ( const Optional< unsigned int > & quantizationDim)

Definition at line 503 of file Tensor.cpp.

504{
505 m_Quantization.m_QuantizationDim = quantizationDim;
506}

Referenced by armnnUtils::Permuted(), TensorInfo(), and TensorInfo().

◆ SetQuantizationOffset()

void SetQuantizationOffset ( int32_t offset)

Definition at line 493 of file Tensor.cpp.

494{
495 m_Quantization.m_Offset = MakeOptional<int32_t>(offset);
496}

References armnn::MakeOptional().

Referenced by armnn::IsMultiAxesReduceSupported(), TensorInfo(), and TensorInfo().

◆ SetQuantizationScale()

void SetQuantizationScale ( float scale)

Definition at line 477 of file Tensor.cpp.

478{
479 m_Quantization.m_Scales = { scale };
480}

Referenced by armnn::IsMultiAxesReduceSupported(), TensorInfo(), and TensorInfo().

◆ SetQuantizationScales()

void SetQuantizationScales ( const std::vector< float > & scales)

Definition at line 456 of file Tensor.cpp.

457{
458 m_Quantization.m_Scales = scales;
459}

Referenced by armnn::IsMultiAxesReduceSupported(), TensorInfo(), and TensorInfo().

◆ SetShape()


The documentation for this class was generated from the following files: