ArmNN
 25.02
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
TensorInfo Class Reference

#include <Tensor.hpp>

Public Types

using DifferenceType = std::vector< TensorInfo >::difference_type
 

Public Member Functions

 TensorInfo ()
 Empty (invalid) constructor. More...
 
 TensorInfo (const TensorShape &shape, DataType dataType, float quantizationScale=1.0f, int32_t quantizationOffset=0, bool isConstant=false)
 
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, float quantizationScale=1.0f, int32_t quantizationOffset=0, bool isConstant=false)
 
 TensorInfo (const TensorShape &shape, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim, bool isConstant=false)
 
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim, bool isConstant=false)
 
 TensorInfo (const TensorInfo &other)
 
TensorInfooperator= (const TensorInfo &other)
 
bool operator== (const TensorInfo &other) const
 
bool operator!= (const TensorInfo &other) const
 
const TensorShapeGetShape () const
 
TensorShapeGetShape ()
 
void SetShape (const TensorShape &newShape)
 
unsigned int GetNumDimensions () const
 
unsigned int GetNumElements () const
 
DataType GetDataType () const
 
void SetDataType (DataType type)
 
bool HasMultipleQuantizationScales () const
 
bool HasPerAxisQuantization () const
 
std::vector< float > GetQuantizationScales () const
 
void SetQuantizationScales (const std::vector< float > &scales)
 
float GetQuantizationScale () const
 
void SetQuantizationScale (float scale)
 
int32_t GetQuantizationOffset () const
 
void SetQuantizationOffset (int32_t offset)
 
Optional< unsigned int > GetQuantizationDim () const
 
void SetQuantizationDim (const Optional< unsigned int > &quantizationDim)
 
bool IsQuantized () const
 
bool IsConstant () const
 
void SetConstant (const bool IsConstant=true)
 Marks the data corresponding to this tensor info as constant. More...
 
bool IsTypeSpaceMatch (const TensorInfo &other) const
 Check that the types are the same and, if quantize, that the quantization parameters are the same. More...
 
unsigned int GetNumBytes () const
 

Detailed Description

Definition at line 152 of file Tensor.hpp.

Member Typedef Documentation

◆ DifferenceType

using DifferenceType = std::vector<TensorInfo>::difference_type

Definition at line 191 of file Tensor.hpp.

Constructor & Destructor Documentation

◆ TensorInfo() [1/6]

Empty (invalid) constructor.

Definition at line 341 of file Tensor.cpp.

342 : m_DataType(DataType::Float32), m_IsConstant(false)
343 {
344 }

References armnn::Float32.

◆ TensorInfo() [2/6]

TensorInfo ( const TensorShape shape,
DataType  dataType,
float  quantizationScale = 1.0f,
int32_t  quantizationOffset = 0,
bool  isConstant = false 
)

Definition at line 346 of file Tensor.cpp.

351  : m_Shape(shape)
352  , m_DataType(dataType)
353  , m_IsConstant(isConstant)
354 {
355  SetQuantizationScale(quantizationScale);
356  SetQuantizationOffset(quantizationOffset);
357 }
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:493
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:477

References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

◆ TensorInfo() [3/6]

TensorInfo ( unsigned int  numDimensions,
const unsigned int *  dimensionSizes,
DataType  dataType,
float  quantizationScale = 1.0f,
int32_t  quantizationOffset = 0,
bool  isConstant = false 
)

Definition at line 359 of file Tensor.cpp.

365  : m_Shape(numDimensions, dimensionSizes), m_DataType(dataType), m_IsConstant(isConstant)
366 {
367  SetQuantizationScale(quantizationScale);
368  SetQuantizationOffset(quantizationOffset);
369 }

References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

◆ TensorInfo() [4/6]

TensorInfo ( const TensorShape shape,
DataType  dataType,
const std::vector< float > &  quantizationScales,
unsigned int  quantizationDim,
bool  isConstant = false 
)

Definition at line 371 of file Tensor.cpp.

376  : m_Shape(shape)
377  , m_DataType(dataType)
378  , m_IsConstant(isConstant)
379 {
380  SetQuantizationScales(quantizationScales);
381  SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
382 }
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:456
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:503

References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().

◆ TensorInfo() [5/6]

TensorInfo ( unsigned int  numDimensions,
const unsigned int *  dimensionSizes,
DataType  dataType,
const std::vector< float > &  quantizationScales,
unsigned int  quantizationDim,
bool  isConstant = false 
)

Definition at line 384 of file Tensor.cpp.

390  : m_Shape(numDimensions, dimensionSizes)
391  , m_DataType(dataType)
392  , m_IsConstant(isConstant)
393 {
394  SetQuantizationScales(quantizationScales);
395  SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
396 }

References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().

◆ TensorInfo() [6/6]

TensorInfo ( const TensorInfo other)

Definition at line 398 of file Tensor.cpp.

399 : m_Shape(other.m_Shape)
400 , m_DataType(other.m_DataType)
401 , m_IsConstant(other.m_IsConstant)
402 , m_Quantization(other.m_Quantization)
403 {}

Member Function Documentation

◆ GetDataType()

DataType GetDataType ( ) const
inline

Definition at line 200 of file Tensor.hpp.

200 { return m_DataType; }

Referenced by BiasAndWeightsTypesMatch::BiasAndWeightsTypesMatch(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), RefTensorHandle::CanBeImported(), TosaRefTensorHandle::CanBeImported(), armnn::ClCastValidate(), armnn::ClConvertFp16ToFp32WorkloadValidate(), armnn::ClConvertFp32ToFp16WorkloadValidate(), armnn::ClTileWorkloadValidate(), ConvertQuantizeToTosaOperator(), ConvertSoftmaxToTosaOperator(), RefGatherWorkload::Execute(), RefTileWorkload::Execute(), BaseTensor< MemoryType >::GetDataType(), Layer::GetDataType(), armnn::GetLayerInOutDatatype(), armnn::optimizations::pad_fold::GetLowestElement(), armnn::GetUnpaddedTensorStrides(), armnn::InitializeArmComputeTensorData(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), SampleDynamicLayerSupport::IsAdditionSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsConvolution3dSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), NeonLayerSupport::IsFloorSupported(), ClLayerSupport::IsQLstmSupported(), NeonLayerSupport::IsQLstmSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), NeonLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::LstmImpl(), armnn::NeonCastValidate(), armnn::NeonTileWorkloadValidate(), armnn::Pad(), armnn::PermuteTensor(), ConvertFp32NetworkToFp16Impl::Run(), armnnUtils::ToFloatArray(), ArgMinMaxQueueDescriptor::Validate(), FullyConnectedQueueDescriptor::Validate(), Convolution2dQueueDescriptor::Validate(), Convolution3dQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), QuantizeQueueDescriptor::Validate(), EqualQueueDescriptor::Validate(), ConvertFp16ToFp32QueueDescriptor::Validate(), ConvertFp32ToFp16QueueDescriptor::Validate(), GreaterQueueDescriptor::Validate(), GatherNdQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), TransposeConvolution2dQueueDescriptor::Validate(), ComparisonQueueDescriptor::Validate(), LogicalBinaryQueueDescriptor::Validate(), ConvertConstDequantisationLayersToConstLayersImpl::~ConvertConstDequantisationLayersToConstLayersImpl(), ConvertConstPermuteLayersToConstLayers::~ConvertConstPermuteLayersToConstLayers(), and TurboConvertConstDequantisationLayersToConstLayersImpl::~TurboConvertConstDequantisationLayersToConstLayersImpl().

◆ GetNumBytes()

◆ GetNumDimensions()

unsigned int GetNumDimensions ( ) const
inline

Definition at line 197 of file Tensor.hpp.

197 { return m_Shape.GetNumDimensions(); }
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174

References TensorShape::GetNumDimensions().

Referenced by armnn::ArgMinMax(), armnn::BatchToSpaceNd(), armnn::BuildAddMulAddTensorInfoLists(), armnn::CalculateGatherNdKeyIndices(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::CollapseLeadingUnitDimensions(), armnn::ComputeAclAxis(), armnn::ComputeReductionTensorShape(), armnn::ComputeSoftmaxAclAxis(), armnn::Concatenate(), armnn::ConnectedToLayerType(), ConvertSoftmaxToTosaOperator(), armnn::Gather(), armnn::GetNumActivations(), BaseTensor< MemoryType >::GetNumDimensions(), RefLayerSupport::IsMeanSupported(), armnn::LogSoftmax(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::PrintOutput(), armnnUtils::ProcessConcatInputTensorInfo(), armnn::Reduce(), armnn::ReverseV2(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), AddBroadcastReshapeLayerImpl::Run(), armnn::ScatterNd(), armnn::Softmax(), armnn::SpaceToBatchNd(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnn::Tile(), FullyConnectedQueueDescriptor::Validate(), MeanQueueDescriptor::Validate(), PadQueueDescriptor::Validate(), InstanceNormalizationQueueDescriptor::Validate(), L2NormalizationQueueDescriptor::Validate(), SpaceToBatchNdQueueDescriptor::Validate(), StridedSliceQueueDescriptor::Validate(), GatherNdQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), BatchMatMulQueueDescriptor::Validate(), ReverseV2QueueDescriptor::Validate(), TileQueueDescriptor::Validate(), QueueDescriptor::ValidateTensorNumDimensions(), ReduceLayer::ValidateTensorShapesFromInputs(), and ScatterNdLayer::ValidateTensorShapesFromInputs().

◆ GetNumElements()

◆ GetQuantizationDim()

Optional< unsigned int > GetQuantizationDim ( ) const

Definition at line 498 of file Tensor.cpp.

499 {
500  return m_Quantization.m_QuantizationDim;
501 }

Referenced by armnnUtils::ToFloatArray().

◆ GetQuantizationOffset()

int32_t GetQuantizationOffset ( ) const

◆ GetQuantizationScale()

float GetQuantizationScale ( ) const

Definition at line 461 of file Tensor.cpp.

462 {
463  if (m_Quantization.m_Scales.empty())
464  {
465  // NOTE: old default for backward compatibility
466  return 1.0f;
467  }
468 
470  {
471  throw RuntimeException("Invalid call to GetQuantizationScale on a tensor with multiple scale values. Use "
472  "GetQuantizationScales instead.");
473  }
474  return m_Quantization.m_Scales[0];
475 }
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:203

References TensorInfo::HasMultipleQuantizationScales().

Referenced by ConvertQuantizeToTosaOperator(), ConvertSoftmaxToTosaOperator(), armnn::optimizations::pad_fold::GetLowestElement(), armnn::IsMultiAxesReduceSupported(), TensorInfo::IsTypeSpaceMatch(), QuantizationParametersAreEqual::QuantizationParametersAreEqual(), armnnUtils::ToFloatArray(), QLstmQueueDescriptor::Validate(), and ConvertConstDequantisationLayersToConstLayersImpl::~ConvertConstDequantisationLayersToConstLayersImpl().

◆ GetQuantizationScales()

std::vector< float > GetQuantizationScales ( ) const

Definition at line 451 of file Tensor.cpp.

452 {
453  return m_Quantization.m_Scales;
454 }

Referenced by armnn::IsMultiAxesReduceSupported(), and armnnUtils::ToFloatArray().

◆ GetShape() [1/2]

TensorShape& GetShape ( )
inline

Definition at line 194 of file Tensor.hpp.

194 { return m_Shape; }

◆ GetShape() [2/2]

const TensorShape& GetShape ( ) const
inline

Definition at line 193 of file Tensor.hpp.

193 { return m_Shape; }

Referenced by armnn::ArgMinMax(), BatchMatMul::BatchMatMul(), armnn::BatchNormImpl(), armnn::BatchToSpaceNd(), armnn::CalculateGatherNdKeyIndices(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::ClUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn::CollapseLeadingUnitDimensions(), armnn::Concatenate(), armnn::Convert1HWOTensorInfoToAcl(), armnn::Convert1HWOTensorToAcl(), armnn::Convert1HWOtoMIHW(), ConvertConstantToTosaOperator(), ConvertQuantizeToTosaOperator(), ConvertSoftmaxToTosaOperator(), armnn::ConvertWeightTensorFromArmnnToAcl(), armnn::CreateAclNormalizationLayerInfoForL2Normalization(), armnnOnnxParser::CreateConstTensorImpl(), armnn::DepthToSpace(), armnn::DetectionPostProcess(), armnn::ExecuteFunction(), armnn::Gather(), armnn::GetNumActivations(), BaseTensor< MemoryType >::GetShape(), ConstTensorHandle::GetShape(), RefTensorHandle::GetShape(), RefTensorHandleDecorator::GetShape(), TosaRefTensorHandle::GetShape(), SampleTensorHandle::GetShape(), armnn::GetUnpaddedTensorStrides(), armnn::InstanceNorm(), armnn::IsMultiAxesReduceSupported(), ClLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsSplitterSupported(), armnn::LogSoftmax(), armnn::LstmImpl(), armnn::MirrorPad(), armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn::NeonUnidirectionalSequenceLstmWorkloadValidate(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::Pad(), armnn::PermuteTensor(), armnn::Pooling2d(), armnn::Pooling3d(), armnn::PreluImpl(), armnn::PrintOutput(), armnnUtils::ProcessConcatInputTensorInfo(), armnn::Reduce(), armnnUtils::ReduceDims(), RefTransposeConvolution2dWorkload::RefTransposeConvolution2dWorkload(), armnn::ReshapeWeightsForAcl(), armnn::Resize(), armnn::ReverseV2(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), OptimizeConsecutiveReshapesImpl::Run(), AddBroadcastReshapeLayerImpl::Run(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), armnn::ScatterNd(), Convolution2dLayer::SerializeLayerParameters(), Convolution3dLayer::SerializeLayerParameters(), DepthwiseConvolution2dLayer::SerializeLayerParameters(), Graph::SerializeToDot(), ShapesAreBroadcastCompatible::ShapesAreBroadcastCompatible(), ShapesAreSameRank::ShapesAreSameRank(), armnn::Slice(), armnn::Softmax(), armnn::SpaceToBatchNd(), armnn::SpaceToDepth(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnn::StridedSlice(), armnn::Tile(), armnnUtils::ToFloatArray(), ArgMinMaxQueueDescriptor::Validate(), PermuteQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), DetectionPostProcessQueueDescriptor::Validate(), ResizeQueueDescriptor::Validate(), SpaceToBatchNdQueueDescriptor::Validate(), SpaceToDepthQueueDescriptor::Validate(), BatchToSpaceNdQueueDescriptor::Validate(), TransposeQueueDescriptor::Validate(), QLstmQueueDescriptor::Validate(), QuantizedLstmQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), DepthToSpaceQueueDescriptor::Validate(), BatchMatMulQueueDescriptor::Validate(), QueueDescriptor::ValidateTensorNumDimensions(), OutputSlot::ValidateTensorShape(), AbsLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), BatchMatMulLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), BroadcastToLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), ChannelShuffleLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), Convolution3dLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), ElementwiseBinaryLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), GatherNdLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), Pooling3dLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), QLstmLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ReverseV2Layer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ScatterNdLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), TileLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), ConvertConstDequantisationLayersToConstLayersImpl::~ConvertConstDequantisationLayersToConstLayersImpl(), ConvertConstPermuteLayersToConstLayers::~ConvertConstPermuteLayersToConstLayers(), PermuteAsReshapeImpl::~PermuteAsReshapeImpl(), TransposeAsReshapeImpl::~TransposeAsReshapeImpl(), and TurboConvertConstDequantisationLayersToConstLayersImpl::~TurboConvertConstDequantisationLayersToConstLayersImpl().

◆ HasMultipleQuantizationScales()

bool HasMultipleQuantizationScales ( ) const
inline

Definition at line 203 of file Tensor.hpp.

203 { return m_Quantization.m_Scales.size() > 1; }

Referenced by TensorInfo::GetQuantizationScale(), TensorInfo::HasPerAxisQuantization(), armnn::IsMultiAxesReduceSupported(), and TensorInfo::IsTypeSpaceMatch().

◆ HasPerAxisQuantization()

bool HasPerAxisQuantization ( ) const

Definition at line 446 of file Tensor.cpp.

447 {
448  return HasMultipleQuantizationScales() || m_Quantization.m_QuantizationDim.has_value();
449 }

References TensorInfo::HasMultipleQuantizationScales().

Referenced by armnn::Convert1HWOtoMIHW(), and armnnUtils::ToFloatArray().

◆ IsConstant()

◆ IsQuantized()

bool IsQuantized ( ) const

◆ IsTypeSpaceMatch()

bool IsTypeSpaceMatch ( const TensorInfo other) const

Check that the types are the same and, if quantize, that the quantization parameters are the same.

Definition at line 432 of file Tensor.cpp.

433 {
434  bool match = true;
435 
436  match &= m_DataType == other.m_DataType;
437 
439  {
440  match &= GetQuantizationScale() == other.GetQuantizationScale() &&
441  GetQuantizationOffset() == other.GetQuantizationOffset();
442  }
443  return match;
444 }
float GetQuantizationScale() const
Definition: Tensor.cpp:461
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:482
bool IsQuantized() const
Definition: Tensor.cpp:508

References TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::HasMultipleQuantizationScales(), and TensorInfo::IsQuantized().

Referenced by ClLayerSupport::IsConcatSupported(), NeonLayerSupport::IsConcatSupported(), ClLayerSupport::IsSplitterSupported(), and NeonLayerSupport::IsSplitterSupported().

◆ operator!=()

bool operator!= ( const TensorInfo other) const

Definition at line 422 of file Tensor.cpp.

423 {
424  return !(*this == other);
425 }

◆ operator=()

TensorInfo & operator= ( const TensorInfo other)

Definition at line 405 of file Tensor.cpp.

406 {
407  m_Shape = other.m_Shape;
408  m_DataType = other.m_DataType;
409  m_Quantization = other.m_Quantization;
410  m_IsConstant = other.m_IsConstant;
411  return *this;
412 }

◆ operator==()

bool operator== ( const TensorInfo other) const

Definition at line 414 of file Tensor.cpp.

415 {
416  return ((m_Shape == other.m_Shape) &&
417  (m_DataType == other.m_DataType) &&
418  (m_Quantization == other.m_Quantization) &&
419  (m_IsConstant == other.m_IsConstant));
420 }

◆ SetConstant()

void SetConstant ( const bool  IsConstant = true)

◆ SetDataType()

◆ SetQuantizationDim()

void SetQuantizationDim ( const Optional< unsigned int > &  quantizationDim)

Definition at line 503 of file Tensor.cpp.

504 {
505  m_Quantization.m_QuantizationDim = quantizationDim;
506 }

Referenced by armnnUtils::Permuted(), and TensorInfo::TensorInfo().

◆ SetQuantizationOffset()

void SetQuantizationOffset ( int32_t  offset)

Definition at line 493 of file Tensor.cpp.

494 {
495  m_Quantization.m_Offset = MakeOptional<int32_t>(offset);
496 }

Referenced by armnn::IsMultiAxesReduceSupported(), and TensorInfo::TensorInfo().

◆ SetQuantizationScale()

void SetQuantizationScale ( float  scale)

Definition at line 477 of file Tensor.cpp.

478 {
479  m_Quantization.m_Scales = { scale };
480 }

Referenced by armnn::IsMultiAxesReduceSupported(), and TensorInfo::TensorInfo().

◆ SetQuantizationScales()

void SetQuantizationScales ( const std::vector< float > &  scales)

Definition at line 456 of file Tensor.cpp.

457 {
458  m_Quantization.m_Scales = scales;
459 }

Referenced by armnn::IsMultiAxesReduceSupported(), and TensorInfo::TensorInfo().

◆ SetShape()


The documentation for this class was generated from the following files: