ArmNN
 25.11
Loading...
Searching...
No Matches
NeonDepthwiseConvolutionWorkload.cpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
7
9
11
14
16
19
20#include <arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h>
21
22using namespace armnnUtils;
23
24namespace armnn
25{
26
27using namespace armcomputetensorutils;
28
30 const TensorInfo& output,
31 const DepthwiseConvolution2dDescriptor& descriptor,
32 const TensorInfo& weights,
33 const Optional<TensorInfo>& biases,
34 const ActivationDescriptor* activationDescriptor)
35{
36 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
37 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
38
39 // ArmNN format for weights for depthwise is [1, H, W, C] independently of the input/output layout
40 //
41 // ACL format for weights for depthwise is:
42 // - [1, H, W, C] for [N, H, W, C] input/output layout (matches with ArmNN)
43 // - [1, C, H, W] for [N, C, H, W] input/output layout
44 //
45 // Therefore ArmNN weights have to be permuted when input/output layout is [N, C, H, W] to pass them to ACL.
46 // The PermuteDepthwiseConv2dWeights backend optimization takes care of this, but it has not been performed yet,
47 // so we do the permute here for the TensorInfo weights.
48 unsigned int aclDepthMultiplier;
49 TensorInfo weightsPermuted;
50 std::tie(weightsPermuted, aclDepthMultiplier) = Convert1HWOTensorInfoToAcl(weights, input, descriptor.m_DataLayout);
51
52 // Convert the weights into the compute library format
53 arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weightsPermuted, descriptor.m_DataLayout);
54 aclWeightsInfo.set_are_values_constant(weights.IsConstant());
55
56 arm_compute::TensorInfo aclBiasesInfo;
57 arm_compute::TensorInfo* optionalAclBiasesInfo = nullptr;
58 if (descriptor.m_BiasEnabled)
59 {
60 if(!biases.has_value())
61 {
62 return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
63 "ArmNN NeonDepthwiseConvolutionWorkload has empty bias value."};
64 }
65 aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
66 aclBiasesInfo.set_are_values_constant(biases.value().IsConstant());
67 optionalAclBiasesInfo = &aclBiasesInfo;
68 }
69
70 const arm_compute::PadStrideInfo aclPadStrideInfo = BuildArmComputePadStrideInfo(descriptor);
71 const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX,
72 descriptor.m_DilationY);
73
74 const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
75 activationDescriptor);
76
77 return arm_compute::NEDepthwiseConvolutionLayer::validate(&aclInputInfo,
78 &aclWeightsInfo,
79 optionalAclBiasesInfo,
80 &aclOutputInfo,
81 aclPadStrideInfo,
82 aclDepthMultiplier,
83 activationInfo,
84 aclDilationInfo);
85}
86
89 const WorkloadInfo& info)
91{
92 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
93 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
94 arm_compute::ITensor& weights = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
95 weights.info()->set_are_values_constant(info.m_InputTensorInfos[1].IsConstant());
96 arm_compute::ITensor* biasesPtr = nullptr;
97 if (m_Data.m_Parameters.m_BiasEnabled)
98 {
99 biasesPtr = &PolymorphicDowncast<IAclTensorHandle *>(m_Data.m_Inputs[2])->GetTensor();
100 biasesPtr->info()->set_are_values_constant(info.m_InputTensorInfos[2].IsConstant());
101 }
102
103 arm_compute::TensorShape weightsShape = weights.info()->tensor_shape();
104 arm_compute::TensorShape inputShape = input.info()->tensor_shape();
105
106 // The PermuteDepthwiseConv2dWeights backend optimization has been performed,
107 // converting weights to have the same data layout as input.
108 unsigned int depthMultiplier =
109 ComputeDepthwiseConv2dDepthMultiplier(m_Data.m_Parameters.m_DataLayout, weightsShape, inputShape);
110
111 const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(m_Data.m_Parameters.m_DilationX,
112 m_Data.m_Parameters.m_DilationY);
113
114 uint32_t numInputs = m_Data.m_Parameters.m_BiasEnabled ? 3: 2;
115 m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionWorkload", numInputs, 1);
116
117 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
118 input.info()->set_data_layout(aclDataLayout);
119 weights.info()->set_data_layout(aclDataLayout);
120 output.info()->set_data_layout(aclDataLayout);
121
122 const arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
123
124 const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
125
126 m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer>();
127 static_cast<arm_compute::NEDepthwiseConvolutionLayer*>(
128 m_pDepthwiseConvolutionLayer.get())->configure(&input,
129 &weights,
130 biasesPtr,
131 &output,
132 padStrideInfo,
133 depthMultiplier,
134 activationInfo,
135 aclDilationInfo);
136
137 // Add details for profiling output
138 WorkloadInfo detailsInfo;
139
140 detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
141 detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
142
143 // Report Profiling Details
144 ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonDepthwiseConvolution2dWorkload_Construct",
145 descriptor.m_Parameters,
146 detailsInfo,
147 GetGuid());
148
149 m_pDepthwiseConvolutionLayer->prepare();
150}
151
153{
154 ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonDepthwiseConvolutionWorkload_Execute");
155
156 m_pDepthwiseConvolutionLayer->run();
157}
158
159} //namespace armnn
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
arm::pipe::ProfilingGuid GetGuid() const final
Definition Workload.hpp:52
NeonBaseWorkload(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info)
NeonDepthwiseConvolutionWorkload(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info)
bool has_value() const noexcept
Definition Optional.hpp:53
bool IsConstant() const
Definition Tensor.cpp:513
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
std::tuple< TensorInfo, unsigned int > Convert1HWOTensorInfoToAcl(const TensorInfo &weightInfo, const TensorInfo &inputInfo, const DataLayout dataLayout)
Weights for depthwise have a datalayout of [1,H,W,O] = [1,H,W,I*M] This function coverts a TensorInfo...
arm_compute::ActivationLayerInfo ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor &actDesc)
An ActivationDescriptor for the ActivationLayer.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_DilationY
Dilation factor value for height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_DilationX
Dilation factor value for width dimension.
bool m_BiasEnabled
Enable/disable bias.
Depthwise Convolution 2D layer workload data.
Contains information about TensorInfos of a layer.
std::vector< TensorInfo > m_OutputTensorInfos
std::vector< TensorInfo > m_InputTensorInfos