ArmNN
 24.08
GpuFsaLayerSupport Class Reference

#include <GpuFsaLayerSupport.hpp>

Inheritance diagram for GpuFsaLayerSupport:
[legend]
Collaboration diagram for GpuFsaLayerSupport:
[legend]

Public Member Functions

bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override
 Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported. More...
 

Additional Inherited Members

- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 13 of file GpuFsaLayerSupport.hpp.

Member Function Documentation

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType type,
const std::vector< TensorInfo > &  infos,
const BaseDescriptor descriptor,
const Optional< LstmInputParamsInfo > &  lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > &  quantizedLstmParamsInfo,
Optional< std::string & >  reasonIfUnsupported 
) const
overridevirtual

Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported.

Reimplemented from ILayerSupport.

Definition at line 71 of file GpuFsaLayerSupport.cpp.

77 {
78  IgnoreUnused(lstmParamsInfo);
79  IgnoreUnused(quantizedLstmInputParamsInfo);
80 
81  switch (type)
82  {
84  {
85  if (infos.size() != 2)
86  {
87  throw InvalidArgumentException("Invalid number of Activation TensorInfos. "
88  "TensorInfos should be of format: {input, output}.");
89  }
90 
91  auto desc = PolymorphicDowncast<const ActivationDescriptor*>(&descriptor);
93  reasonIfUnsupported,
94  infos[0],
95  *desc);
96  }
98  {
99  if (infos.size() != 3)
100  {
101  throw InvalidArgumentException("Invalid number of BatchMatMul TensorInfos. "
102  "TensorInfos should be of format: {input0, input1 output}.");
103  }
104 
105  auto desc = PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor);
107  reasonIfUnsupported,
108  infos[0],
109  infos[1],
110  *desc);
111  }
112  case LayerType::Cast:
113  {
114  if (infos.size() != 2)
115  {
116  throw InvalidArgumentException("Invalid number of cast TensorInfos. "
117  "TensorInfos should be of format: {input, output}.");
118  }
119 
121  reasonIfUnsupported,
122  infos[0],
123  infos[1]);
124  }
126  {
127  if (infos.size() != 4)
128  {
129  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
130  "TensorInfos should be of format: {input, output, weights, biases}.");
131  }
132 
133  auto desc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
134  if (infos[3] == TensorInfo())
135  {
137  reasonIfUnsupported,
138  infos[0],
139  *desc,
140  infos[2],
141  EmptyOptional());
142  }
143  else
144  {
146  reasonIfUnsupported,
147  infos[0],
148  *desc,
149  infos[2],
150  infos[3]);
151  }
152  }
154  {
155  if (infos.size() != 4)
156  {
157  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2dDescriptor TensorInfos. "
158  "TensorInfos should be of format: {input, output, weights, biases}.");
159  }
160 
161  auto desc = PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor);
162  if (infos[3] == TensorInfo())
163  {
165  reasonIfUnsupported,
166  infos[0],
167  *desc,
168  infos[2],
169  EmptyOptional());
170  }
171  else
172  {
174  reasonIfUnsupported,
175  infos[0],
176  *desc,
177  infos[2],
178  infos[3]);
179  }
180  }
182  {
183  if (infos.size() != 3)
184  {
185  throw InvalidArgumentException("Invalid number of ElementwiseBinary TensorInfos. "
186  "TensorInfos should be of format: {input0, input1, output}.");
187  }
188 
189  auto desc = PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&descriptor);
191  reasonIfUnsupported,
192  infos[0],
193  infos[1],
194  *desc);
195  }
197  {
198  if (infos.size() != 2)
199  {
200  throw InvalidArgumentException("Invalid number of Pooling2d TensorInfos. "
201  "TensorInfos should be of format: {input, output}.");
202  }
203 
204  auto desc = PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor);
206  reasonIfUnsupported,
207  infos[0],
208  *desc);
209  }
210  case LayerType::Reshape:
211  {
212  if (infos.size() != 2)
213  {
214  throw InvalidArgumentException("Invalid number of Reshape TensorInfos. "
215  "TensorInfos should be of format: { input, output }.");
216  }
217 
218  auto desc = PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor);
219 
221  reasonIfUnsupported,
222  infos[0],
223  *desc);
224  }
225  case LayerType::Resize:
226  {
227  if (infos.size() != 2)
228  {
229  throw InvalidArgumentException("Invalid number of Resize TensorInfos. "
230  "TensorInfos should be of format: {input, output}.");
231  }
232 
233  auto desc = PolymorphicDowncast<const ResizeDescriptor*>(&descriptor);
235  reasonIfUnsupported,
236  infos[0],
237  *desc);
238  }
239  case LayerType::Softmax:
240  {
241  if (infos.size() != 2)
242  {
243  throw InvalidArgumentException("Invalid number of Softmax TensorInfos. "
244  "TensorInfos should be of format: {input, output}.");
245  }
246 
247  auto desc = PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor);
249  reasonIfUnsupported,
250  infos[0],
251  infos[1],
252  *desc);
253  }
254  case LayerType::Constant:
255  case LayerType::Input:
256  case LayerType::Output:
257  return IsGpuFsaBackendSupported(reasonIfUnsupported, infos[0]);
258  default:
259  // Layers not supported in the GpuFsa backend.
260  return false;
261  }
262 }

References armnn::Activation, armnn::BatchMatMul, armnn::Cast, armnn::Constant, armnn::Convolution2d, armnn::DepthwiseConvolution2d, armnn::ElementwiseBinary, FORWARD_LAYER_VALIDATE_FUNC, armnn::GpuFsaActivationValidate(), armnn::GpuFsaBatchMatMulValidate(), armnn::GpuFsaCastValidate(), armnn::GpuFsaConvolution2dValidate(), armnn::GpuFsaDepthwiseConvolution2dValidate(), armnn::GpuFsaElementwiseBinaryValidate(), armnn::GpuFsaPooling2dValidate(), armnn::GpuFsaReshapeValidate(), armnn::GpuFsaResizeValidate(), armnn::GpuFsaSoftmaxValidate(), armnn::IgnoreUnused(), armnn::Input, armnn::IsGpuFsaBackendSupported(), armnn::Output, armnn::Pooling2d, armnn::Reshape, armnn::Resize, and armnn::Softmax.


The documentation for this class was generated from the following files:
armnn::GpuFsaCastValidate
arm_compute::Status GpuFsaCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: GpuFsaCast.cpp:33
FORWARD_LAYER_VALIDATE_FUNC
#define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: GpuFsaLayerSupport.cpp:67
armnn::GpuFsaPooling2dValidate
arm_compute::Status GpuFsaPooling2dValidate(const TensorInfo &input, const Pooling2dDescriptor &descriptor)
Definition: GpuFsaPooling2d.cpp:22
armnn::IsGpuFsaBackendSupported
bool IsGpuFsaBackendSupported(Optional< std::string & > reasonIfUnsupported, Args... args)
Definition: GpuFsaLayerSupport.cpp:31
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::GpuFsaResizeValidate
arm_compute::Status GpuFsaResizeValidate(const TensorInfo &input, const ResizeDescriptor &descriptor)
Definition: GpuFsaResize.cpp:22
armnn::LayerType::Softmax
@ Softmax
armnn::GpuFsaReshapeValidate
arm_compute::Status GpuFsaReshapeValidate(const TensorInfo &input, const ReshapeDescriptor &descriptor)
Definition: GpuFsaReshape.cpp:22
armnn::GpuFsaActivationValidate
arm_compute::Status GpuFsaActivationValidate(const TensorInfo &input, const ActivationDescriptor &descriptor)
Definition: GpuFsaActivation.cpp:22
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::GpuFsaConvolution2dValidate
arm_compute::Status GpuFsaConvolution2dValidate(const TensorInfo &input, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: GpuFsaConvolution2d.cpp:24
armnn::LayerType::Reshape
@ Reshape
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::GpuFsaElementwiseBinaryValidate
arm_compute::Status GpuFsaElementwiseBinaryValidate(const TensorInfo &input0, const TensorInfo &input1, const ElementwiseBinaryDescriptor &descriptor)
Definition: GpuFsaElementwiseBinary.cpp:24
armnn::LayerType::Input
@ Input
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::GpuFsaDepthwiseConvolution2dValidate
arm_compute::Status GpuFsaDepthwiseConvolution2dValidate(const TensorInfo &input, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: GpuFsaDepthwiseConvolution2d.cpp:26
armnn::LayerType::Activation
@ Activation
armnn::GpuFsaSoftmaxValidate
arm_compute::Status GpuFsaSoftmaxValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: GpuFsaSoftmax.cpp:22
armnn::GpuFsaBatchMatMulValidate
arm_compute::Status GpuFsaBatchMatMulValidate(const TensorInfo &input0, const TensorInfo &input1, const BatchMatMulDescriptor &descriptor)
Definition: GpuFsaBatchMatMul.cpp:22
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant