ArmNN
 24.08
GpuFsaConvolution2d.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "UtilsGpuFsa.hpp"
8 
10 
11 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
12 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
13 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h>
14 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
15 
16 #include <vector>
17 
18 using namespace arm_compute::experimental::dynamic_fusion;
19 using namespace armnn::armcomputetensorutils;
20 
21 namespace armnn
22 {
23 
25  const Convolution2dDescriptor& descriptor,
26  const TensorInfo& weights,
27  const Optional<TensorInfo>& biases)
28 {
29  // Create a new workload sketch, for validation purposes
30  auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context();
31  auto workloadContext = GpuWorkloadContext(&compileCtx);
32  GpuWorkloadSketch sketch{ &workloadContext };
33 
34  // Build and create tensor infos using the sketch
35  const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
36  arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
37  aclWeightsInfo.set_are_values_constant(weights.IsConstant());
38 
39  auto inputInfo = workloadContext.create_tensor_info(aclInputInfo);
40  auto weightInfo = workloadContext.create_tensor_info(aclWeightsInfo);
41 
42  // Only create the bias tensor info if enabled, otherwise pass nullptr to validate_op
43  arm_compute::TensorInfo aclBiasInfo;
44  arm_compute::ITensorInfo* biasSketchInfoPtr = nullptr;
45 
46  if (descriptor.m_BiasEnabled)
47  {
48  if(!biases.has_value())
49  {
50  throw InvalidArgumentException("GpuFsaConvolution2d::ValidateOp: No biases set when biases are enabled");
51  }
52  aclBiasInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
53  aclBiasInfo.set_are_values_constant(biases.value().IsConstant());
54 
55  biasSketchInfoPtr = workloadContext.create_tensor_info(aclBiasInfo);
56  }
57 
58  Conv2dAttributes conv2dAttributes = CreateConv2dAttributes(descriptor);
59 
60  // Validate operator, check status and update reasonIfUnsupported
61  arm_compute::Status aclStatus = GpuConv2d::validate_op(sketch,
62  inputInfo,
63  weightInfo,
64  biasSketchInfoPtr,
65  conv2dAttributes);
66 
67  return aclStatus;
68 }
69 
71  const TensorInfo& input,
72  const Convolution2dDescriptor& descriptor,
73  const TensorInfo& weights,
74  const Optional<TensorInfo>& biases)
75 {
76 /*
77  * Creating an Op for the GpuFsa backend requires us to create and maintain quite a bit of data, which is then stored
78  * in a GpuFsaPreCompiledBlob for execution later. Specifically we need:
79  * GpuWorkloadContext, this contains the TensorInfos and is unique to the Graph being executed
80  * Sketch, this is similar to a subgraph and can contain one or more operations. Multiple ops can be "fused" together
81  * using a single sketch.
82  * The inputTensorinfos / outputTensorInfos, these are pointers to the TensorInfos used when creating the sketch.
83  * They refer to the TensorInfos stored within the GpuWorkloadContext and are needed when executing the sketch
84  * as the TensorInfos used when creating the Tensors must match those used to create the Sketch. Otherwise the runtime
85  * doesn't know which Tensors to use.
86  */
87  GpuWorkloadSketch* sketch = blob->sketch.get();
88  GpuWorkloadContext* workloadContext = blob->workloadContext.get();
89  std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
90  std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
91 
92  // Build and create tensor infos using the sketch
93  const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
94  arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
95  aclWeightsInfo.set_are_values_constant(weights.IsConstant());
96 
97  inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInputInfo));
98  inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclWeightsInfo));
99 
100  // Only create the bias tensor info if enabled, otherwise pass nullptr to validate_op / create_op
101  arm_compute::TensorInfo aclBiasInfo;
102  arm_compute::ITensorInfo* biasSketchInfoPtr = nullptr;
103 
104  if (descriptor.m_BiasEnabled)
105  {
106  if(!biases.has_value())
107  {
108  throw InvalidArgumentException("GpuFsaConvolution2d::CreateOp: No biases set when biases are enabled");
109  }
110  aclBiasInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
111  aclBiasInfo.set_are_values_constant(biases.value().IsConstant());
112 
113  inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclBiasInfo));
114  biasSketchInfoPtr = inputTensorInfos[2];
115  }
116 
117  Conv2dAttributes conv2dAttributes = CreateConv2dAttributes(descriptor);
118 
119  // Validate operator, check status and update reasonIfUnsupported
120  arm_compute::Status aclStatus = GpuConv2d::validate_op(*sketch,
121  inputTensorInfos[0],
122  inputTensorInfos[1],
123  biasSketchInfoPtr,
124  conv2dAttributes);
125 
126  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
127  if (!supported)
128  {
129  throw BackendCapabilityException("\"GpuFsa\" backend failed during Convolution2D operation validation");
130  }
131 
132  // Create the Op within the Sketch using the TensorInfos we have stored
133  arm_compute::ITensorInfo* convOutInfo = GpuConv2d::create_op(*sketch,
134  inputTensorInfos[0],
135  inputTensorInfos[1],
136  biasSketchInfoPtr,
137  conv2dAttributes);
138 
139  // Create the Output
140  outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
141  GpuOutput::create_op(*sketch, convOutInfo, outputTensorInfos[0]);
142 
143  // Store the TensorInfos within the blob as unique_ptrs to be used later
144  blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
145  blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
146 }
147 
148 } // namespace armnn
armnn::Optional
Definition: Optional.hpp:270
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::TensorInfo::IsConstant
bool IsConstant() const
Definition: Tensor.cpp:513
armnn::GpuFsaPreCompiledBlob::inputTensorInfos
std::unique_ptr< std::vector< arm_compute::ITensorInfo * > > inputTensorInfos
Definition: GpuFsaBackend.hpp:37
CreateConv2dAttributes
Conv2dAttributes CreateConv2dAttributes(const Convolution2dDescriptor &descriptor)
Utility function used to setup an arm_compute::Conv2dAttributes object from given descriptor.
Definition: UtilsGpuFsa.cpp:14
armnn::Convolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:582
armnn::GpuFsaPreCompiledBlob::sketch
std::unique_ptr< arm_compute::experimental::dynamic_fusion::GpuWorkloadSketch > sketch
Definition: GpuFsaBackend.hpp:34
GpuFsaConvolution2d.hpp
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::Convolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:584
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::GpuFsaConvolution2dValidate
arm_compute::Status GpuFsaConvolution2dValidate(const TensorInfo &input, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: GpuFsaConvolution2d.cpp:24
armnn::Status
Status
Definition: Types.hpp:42
armnn::BackendCapabilityException
Definition: Exceptions.hpp:152
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
armnn::GpuFsaPreCompiledBlob::workloadContext
std::shared_ptr< arm_compute::experimental::dynamic_fusion::GpuWorkloadContext > workloadContext
Definition: GpuFsaBackend.hpp:35
armnn::GpuFsaPreCompiledBlob
A structure which contains all the elements needed to execute a fused workload in the GpuFsa Backend.
Definition: GpuFsaBackend.hpp:32
UtilsGpuFsa.hpp
armnn::GpuFsaConvolution2dCreateOp
void GpuFsaConvolution2dCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &input, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
Definition: GpuFsaConvolution2d.cpp:70
armnn::GpuFsaPreCompiledBlob::outputTensorInfos
std::unique_ptr< std::vector< arm_compute::ITensorInfo * > > outputTensorInfos
Definition: GpuFsaBackend.hpp:38
armnn::OptionalReferenceSwitch< std::is_reference< T >::value, T >::value
const T & value() const
Definition: Optional.hpp:146
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53