ArmNN
 24.08
LayerSupportRules.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017, 2024 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <algorithm>
9 
10 namespace armnn
11 {
12 
14 {
15  if (!weightsType)
16  {
17  return weightsType;
18  }
19 
20  switch(weightsType.value())
21  {
24  return weightsType;
30  default:
31  throw InvalidArgumentException("GetBiasTypeFromWeightsType(): Unsupported data type.");
32  }
33  return armnn::EmptyOptional();
34 }
35 
36 template<typename F>
37 bool CheckSupportRule(F rule, Optional<std::string&> reasonIfUnsupported, const char* reason)
38 {
39  bool supported = rule();
40  if (!supported && reason)
41  {
42  reasonIfUnsupported.value() += std::string(reason) + "\n"; // Append the reason on a new line
43  }
44  return supported;
45 }
46 
47 struct Rule
48 {
49  bool operator()() const
50  {
51  return m_Res;
52  }
53 
54  bool m_Res = true;
55 };
56 
57 template<typename T>
59 {
60  return true;
61 }
62 
63 template<typename T, typename... Rest>
64 bool AllTypesAreEqualImpl(T t1, T t2, Rest... rest)
65 {
66  static_assert(std::is_same<T, TensorInfo>::value, "Type T must be a TensorInfo");
67 
68  return (t1.GetDataType() == t2.GetDataType()) && AllTypesAreEqualImpl(t2, rest...);
69 }
70 
71 struct TypesAreEqual : public Rule
72 {
73  template<typename ... Ts>
74  TypesAreEqual(const Ts&... ts)
75  {
76  m_Res = AllTypesAreEqualImpl(ts...);
77  }
78 };
79 
81 {
83  {
84  m_Res = info0.GetQuantizationScale() == info1.GetQuantizationScale() &&
86  }
87 };
88 
89 struct TypeAnyOf : public Rule
90 {
91  template<typename Container>
92  TypeAnyOf(const TensorInfo& info, const Container& c)
93  {
94  m_Res = std::any_of(c.begin(), c.end(), [&info](DataType dt)
95  {
96  return dt == info.GetDataType();
97  });
98  }
99 };
100 
101 struct TypeIs : public Rule
102 {
103  TypeIs(const TensorInfo& info, DataType dt)
104  {
105  m_Res = dt == info.GetDataType();
106  }
107 };
108 
110 {
112  {
113  m_Res = !info.IsQuantized() || !info.HasPerAxisQuantization();
114  }
115 };
116 
118 {
119  BiasAndWeightsTypesMatch(const TensorInfo& biases, const TensorInfo& weights)
120  {
121  m_Res = biases.GetDataType() == GetBiasTypeFromWeightsType(weights.GetDataType()).value();
122  }
123 };
124 
126 {
127  template<typename Container>
128  BiasAndWeightsTypesCompatible(const TensorInfo& info, const Container& c)
129  {
130  m_Res = std::any_of(c.begin(), c.end(), [&info](DataType dt)
131  {
132  return dt == GetBiasTypeFromWeightsType(info.GetDataType()).value();
133  });
134  }
135 };
136 
137 struct ShapesAreSameRank : public Rule
138 {
139  ShapesAreSameRank(const TensorInfo& info0, const TensorInfo& info1)
140  {
141  m_Res = info0.GetShape().GetNumDimensions() == info1.GetShape().GetNumDimensions();
142  }
143 };
144 
146 {
147  ShapesAreSameTotalSize(const TensorInfo& info0, const TensorInfo& info1)
148  {
149  m_Res = info0.GetNumElements() == info1.GetNumElements();
150  }
151 };
152 
154 {
155  unsigned int CalcInputSize(const TensorShape& in, const TensorShape& out, unsigned int idx)
156  {
157  unsigned int offset = out.GetNumDimensions() - in.GetNumDimensions();
158  unsigned int sizeIn = (idx < offset) ? 1 : in[idx-offset];
159  return sizeIn;
160  }
161 
162  ShapesAreBroadcastCompatible(const TensorInfo& in0, const TensorInfo& in1, const TensorInfo& out)
163  {
164  const TensorShape& shape0 = in0.GetShape();
165  const TensorShape& shape1 = in1.GetShape();
166  const TensorShape& outShape = out.GetShape();
167 
168  for (unsigned int i=0; i < outShape.GetNumDimensions() && m_Res; i++)
169  {
170  unsigned int sizeOut = outShape[i];
171  unsigned int sizeIn0 = CalcInputSize(shape0, outShape, i);
172  unsigned int sizeIn1 = CalcInputSize(shape1, outShape, i);
173 
174  m_Res &= ((sizeIn0 == sizeOut) || (sizeIn0 == 1)) &&
175  ((sizeIn1 == sizeOut) || (sizeIn1 == 1));
176  }
177  }
178 };
179 
181 {
182  TensorNumDimensionsAreCorrect(const TensorInfo& info, unsigned int expectedNumDimensions)
183  {
184  m_Res = info.GetNumDimensions() == expectedNumDimensions;
185  }
186 };
187 
189 {
190  TensorNumDimensionsAreGreaterOrEqualTo(const TensorInfo& info, unsigned int numDimensionsToCompare)
191  {
192  m_Res = info.GetNumDimensions() >= numDimensionsToCompare;
193  }
194 };
195 
196 } //namespace armnn
armnn::Rule::m_Res
bool m_Res
Definition: LayerSupportRules.hpp:54
armnn::TensorInfo::GetNumElements
unsigned int GetNumElements() const
Definition: Tensor.hpp:198
armnn::Rule::operator()
bool operator()() const
Definition: LayerSupportRules.hpp:49
armnn::Optional
Definition: Optional.hpp:270
armnn::ShapesAreSameRank::ShapesAreSameRank
ShapesAreSameRank(const TensorInfo &info0, const TensorInfo &info1)
Definition: LayerSupportRules.hpp:139
armnn::BiasAndWeightsTypesMatch
Definition: LayerSupportRules.hpp:117
armnn::BiasAndWeightsTypesMatch::BiasAndWeightsTypesMatch
BiasAndWeightsTypesMatch(const TensorInfo &biases, const TensorInfo &weights)
Definition: LayerSupportRules.hpp:119
armnn::Rule
Definition: LayerSupportRules.hpp:47
armnn::TensorNumDimensionsAreGreaterOrEqualTo::TensorNumDimensionsAreGreaterOrEqualTo
TensorNumDimensionsAreGreaterOrEqualTo(const TensorInfo &info, unsigned int numDimensionsToCompare)
Definition: LayerSupportRules.hpp:190
armnn::TensorInfo::GetQuantizationScale
float GetQuantizationScale() const
Definition: Tensor.cpp:461
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::DataType::Float32
@ Float32
armnn::GetBiasTypeFromWeightsType
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
Definition: LayerSupportRules.hpp:13
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::DataType::QSymmS8
@ QSymmS8
armnn::QuantizationParametersAreEqual::QuantizationParametersAreEqual
QuantizationParametersAreEqual(const TensorInfo &info0, const TensorInfo &info1)
Definition: LayerSupportRules.hpp:82
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ShapesAreSameRank
Definition: LayerSupportRules.hpp:137
armnn::TypeNotPerAxisQuantized
Definition: LayerSupportRules.hpp:109
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::TensorNumDimensionsAreCorrect::TensorNumDimensionsAreCorrect
TensorNumDimensionsAreCorrect(const TensorInfo &info, unsigned int expectedNumDimensions)
Definition: LayerSupportRules.hpp:182
armnn::TypesAreEqual::TypesAreEqual
TypesAreEqual(const Ts &... ts)
Definition: LayerSupportRules.hpp:74
armnn::DataType::Float16
@ Float16
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::ShapesAreBroadcastCompatible
Definition: LayerSupportRules.hpp:153
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::TensorNumDimensionsAreCorrect
Definition: LayerSupportRules.hpp:180
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::BiasAndWeightsTypesCompatible::BiasAndWeightsTypesCompatible
BiasAndWeightsTypesCompatible(const TensorInfo &info, const Container &c)
Definition: LayerSupportRules.hpp:128
armnn::BoostLogSeverityMapping::info
@ info
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::AllTypesAreEqualImpl
bool AllTypesAreEqualImpl(T)
Definition: LayerSupportRules.hpp:58
armnn::TypeIs
Definition: LayerSupportRules.hpp:101
armnn::DataType::Signed32
@ Signed32
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::BiasAndWeightsTypesCompatible
Definition: LayerSupportRules.hpp:125
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::ShapesAreBroadcastCompatible::CalcInputSize
unsigned int CalcInputSize(const TensorShape &in, const TensorShape &out, unsigned int idx)
Definition: LayerSupportRules.hpp:155
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::TypesAreEqual
Definition: LayerSupportRules.hpp:71
armnn::ShapesAreBroadcastCompatible::ShapesAreBroadcastCompatible
ShapesAreBroadcastCompatible(const TensorInfo &in0, const TensorInfo &in1, const TensorInfo &out)
Definition: LayerSupportRules.hpp:162
armnn::CheckSupportRule
bool CheckSupportRule(F rule, Optional< std::string & > reasonIfUnsupported, const char *reason)
Definition: LayerSupportRules.hpp:37
armnn::TensorInfo::GetQuantizationOffset
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:482
armnn::TypeAnyOf
Definition: LayerSupportRules.hpp:89
armnn::TypeNotPerAxisQuantized::TypeNotPerAxisQuantized
TypeNotPerAxisQuantized(const TensorInfo &info)
Definition: LayerSupportRules.hpp:111
armnn::ShapesAreSameTotalSize::ShapesAreSameTotalSize
ShapesAreSameTotalSize(const TensorInfo &info0, const TensorInfo &info1)
Definition: LayerSupportRules.hpp:147
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::QuantizationParametersAreEqual
Definition: LayerSupportRules.hpp:80
armnn::TypeAnyOf::TypeAnyOf
TypeAnyOf(const TensorInfo &info, const Container &c)
Definition: LayerSupportRules.hpp:92
armnn::ShapesAreSameTotalSize
Definition: LayerSupportRules.hpp:145
armnn::TensorNumDimensionsAreGreaterOrEqualTo
Definition: LayerSupportRules.hpp:188
armnn::TypeIs::TypeIs
TypeIs(const TensorInfo &info, DataType dt)
Definition: LayerSupportRules.hpp:103