ArmNN
 24.08
GpuFsaLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "GpuFsaLayerSupport.hpp"
7 
8 #include <armnn/Types.hpp>
11 
12 #if defined(ARMCOMPUTEGPUFSA_ENABLED)
15 #include "layers/GpuFsaCast.hpp"
20 #include "layers/GpuFsaReshape.hpp"
21 #include "layers/GpuFsaResize.hpp"
22 #include "layers/GpuFsaSoftmax.hpp"
23 #endif
24 
25 #include <vector>
26 
27 namespace armnn
28 {
29 
30 template<typename ... Args>
31 bool IsGpuFsaBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
32 {
33  IgnoreUnused(reasonIfUnsupported, (args)...);
34 #if defined(ARMCOMPUTEGPUFSA_ENABLED)
35  return true;
36 #else
37  if (reasonIfUnsupported)
38  {
39  reasonIfUnsupported.value() = "The armnn library has been built without CL support";
40  }
41  return false;
42 #endif
43 }
44 
45 #if defined(ARMCOMPUTEGPUFSA_ENABLED)
46 #define FORWARD_GPUFSA_LAYER_SUPPORT_FUNC(expr) (expr)
47 #else
48 #define FORWARD_GPUFSA_LAYER_SUPPORT_FUNC(expr) IsGpuFsaBackendSupported(reasonIfUnsupported)
49 #endif
50 
51 #if defined(ARMCOMPUTEGPUFSA_ENABLED)
52 template<class FuncType, class... Args>
53 inline bool CheckIsLayerSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
54 {
55  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
56  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
57  if (!supported && reasonIfUnsupported)
58  {
59  reasonIfUnsupported.value() = aclStatus.error_description();
60  }
61  return supported;
62 }
63 
64 #define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
65  return CheckIsLayerSupported(func, reasonIfUnsupported, __VA_ARGS__);
66 #else
67 #define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
68  return IsGpuFsaBackendSupported(reasonIfUnsupported, __VA_ARGS__);
69 #endif
70 
72  const std::vector<TensorInfo>& infos,
73  const BaseDescriptor& descriptor,
74  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
75  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
76  Optional<std::string&> reasonIfUnsupported) const
77 {
78  IgnoreUnused(lstmParamsInfo);
79  IgnoreUnused(quantizedLstmInputParamsInfo);
80 
81  switch (type)
82  {
84  {
85  if (infos.size() != 2)
86  {
87  throw InvalidArgumentException("Invalid number of Activation TensorInfos. "
88  "TensorInfos should be of format: {input, output}.");
89  }
90 
91  auto desc = PolymorphicDowncast<const ActivationDescriptor*>(&descriptor);
93  reasonIfUnsupported,
94  infos[0],
95  *desc);
96  }
98  {
99  if (infos.size() != 3)
100  {
101  throw InvalidArgumentException("Invalid number of BatchMatMul TensorInfos. "
102  "TensorInfos should be of format: {input0, input1 output}.");
103  }
104 
105  auto desc = PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor);
107  reasonIfUnsupported,
108  infos[0],
109  infos[1],
110  *desc);
111  }
112  case LayerType::Cast:
113  {
114  if (infos.size() != 2)
115  {
116  throw InvalidArgumentException("Invalid number of cast TensorInfos. "
117  "TensorInfos should be of format: {input, output}.");
118  }
119 
121  reasonIfUnsupported,
122  infos[0],
123  infos[1]);
124  }
126  {
127  if (infos.size() != 4)
128  {
129  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
130  "TensorInfos should be of format: {input, output, weights, biases}.");
131  }
132 
133  auto desc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
134  if (infos[3] == TensorInfo())
135  {
137  reasonIfUnsupported,
138  infos[0],
139  *desc,
140  infos[2],
141  EmptyOptional());
142  }
143  else
144  {
146  reasonIfUnsupported,
147  infos[0],
148  *desc,
149  infos[2],
150  infos[3]);
151  }
152  }
154  {
155  if (infos.size() != 4)
156  {
157  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2dDescriptor TensorInfos. "
158  "TensorInfos should be of format: {input, output, weights, biases}.");
159  }
160 
161  auto desc = PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor);
162  if (infos[3] == TensorInfo())
163  {
165  reasonIfUnsupported,
166  infos[0],
167  *desc,
168  infos[2],
169  EmptyOptional());
170  }
171  else
172  {
174  reasonIfUnsupported,
175  infos[0],
176  *desc,
177  infos[2],
178  infos[3]);
179  }
180  }
182  {
183  if (infos.size() != 3)
184  {
185  throw InvalidArgumentException("Invalid number of ElementwiseBinary TensorInfos. "
186  "TensorInfos should be of format: {input0, input1, output}.");
187  }
188 
189  auto desc = PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&descriptor);
191  reasonIfUnsupported,
192  infos[0],
193  infos[1],
194  *desc);
195  }
197  {
198  if (infos.size() != 2)
199  {
200  throw InvalidArgumentException("Invalid number of Pooling2d TensorInfos. "
201  "TensorInfos should be of format: {input, output}.");
202  }
203 
204  auto desc = PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor);
206  reasonIfUnsupported,
207  infos[0],
208  *desc);
209  }
210  case LayerType::Reshape:
211  {
212  if (infos.size() != 2)
213  {
214  throw InvalidArgumentException("Invalid number of Reshape TensorInfos. "
215  "TensorInfos should be of format: { input, output }.");
216  }
217 
218  auto desc = PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor);
219 
221  reasonIfUnsupported,
222  infos[0],
223  *desc);
224  }
225  case LayerType::Resize:
226  {
227  if (infos.size() != 2)
228  {
229  throw InvalidArgumentException("Invalid number of Resize TensorInfos. "
230  "TensorInfos should be of format: {input, output}.");
231  }
232 
233  auto desc = PolymorphicDowncast<const ResizeDescriptor*>(&descriptor);
235  reasonIfUnsupported,
236  infos[0],
237  *desc);
238  }
239  case LayerType::Softmax:
240  {
241  if (infos.size() != 2)
242  {
243  throw InvalidArgumentException("Invalid number of Softmax TensorInfos. "
244  "TensorInfos should be of format: {input, output}.");
245  }
246 
247  auto desc = PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor);
249  reasonIfUnsupported,
250  infos[0],
251  infos[1],
252  *desc);
253  }
254  case LayerType::Constant:
255  case LayerType::Input:
256  case LayerType::Output:
257  return IsGpuFsaBackendSupported(reasonIfUnsupported, infos[0]);
258  default:
259  // Layers not supported in the GpuFsa backend.
260  return false;
261  }
262 }
263 
264 } // namespace armnn
armnn::GpuFsaCastValidate
arm_compute::Status GpuFsaCastValidate(const TensorInfo &input, const TensorInfo &output)
Definition: GpuFsaCast.cpp:33
armnn::Optional
Definition: Optional.hpp:270
FORWARD_LAYER_VALIDATE_FUNC
#define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported,...)
Definition: GpuFsaLayerSupport.cpp:67
armnn::GpuFsaPooling2dValidate
arm_compute::Status GpuFsaPooling2dValidate(const TensorInfo &input, const Pooling2dDescriptor &descriptor)
Definition: GpuFsaPooling2d.cpp:22
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::IsGpuFsaBackendSupported
bool IsGpuFsaBackendSupported(Optional< std::string & > reasonIfUnsupported, Args... args)
Definition: GpuFsaLayerSupport.cpp:31
GpuFsaElementwiseBinary.hpp
IgnoreUnused.hpp
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::GpuFsaLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: GpuFsaLayerSupport.cpp:71
armnn::GpuFsaResizeValidate
arm_compute::Status GpuFsaResizeValidate(const TensorInfo &input, const ResizeDescriptor &descriptor)
Definition: GpuFsaResize.cpp:22
GpuFsaSoftmax.hpp
PolymorphicDowncast.hpp
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::LayerType::Softmax
@ Softmax
GpuFsaConvolution2d.hpp
GpuFsaLayerSupport.hpp
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::GpuFsaReshapeValidate
arm_compute::Status GpuFsaReshapeValidate(const TensorInfo &input, const ReshapeDescriptor &descriptor)
Definition: GpuFsaReshape.cpp:22
armnn::GpuFsaActivationValidate
arm_compute::Status GpuFsaActivationValidate(const TensorInfo &input, const ActivationDescriptor &descriptor)
Definition: GpuFsaActivation.cpp:22
GpuFsaActivation.hpp
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::LayerType::Pooling2d
@ Pooling2d
GpuFsaResize.hpp
armnn::LayerType::BatchMatMul
@ BatchMatMul
GpuFsaDepthwiseConvolution2d.hpp
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Cast
@ Cast
armnn::GpuFsaConvolution2dValidate
arm_compute::Status GpuFsaConvolution2dValidate(const TensorInfo &input, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: GpuFsaConvolution2d.cpp:24
armnn::Status
Status
Definition: Types.hpp:42
armnn::LayerType::Reshape
@ Reshape
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
GpuFsaCast.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
Types.hpp
GpuFsaBatchMatMul.hpp
armnn::GpuFsaElementwiseBinaryValidate
arm_compute::Status GpuFsaElementwiseBinaryValidate(const TensorInfo &input0, const TensorInfo &input1, const ElementwiseBinaryDescriptor &descriptor)
Definition: GpuFsaElementwiseBinary.cpp:24
armnn::LayerType::Input
@ Input
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::GpuFsaDepthwiseConvolution2dValidate
arm_compute::Status GpuFsaDepthwiseConvolution2dValidate(const TensorInfo &input, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: GpuFsaDepthwiseConvolution2d.cpp:26
armnn::LayerType::Activation
@ Activation
GpuFsaReshape.hpp
armnn::GpuFsaSoftmaxValidate
arm_compute::Status GpuFsaSoftmaxValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: GpuFsaSoftmax.cpp:22
GpuFsaPooling2d.hpp
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::GpuFsaBatchMatMulValidate
arm_compute::Status GpuFsaBatchMatMulValidate(const TensorInfo &input0, const TensorInfo &input1, const BatchMatMulDescriptor &descriptor)
Definition: GpuFsaBatchMatMul.cpp:22
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant