ArmNN
 25.11
Loading...
Searching...
No Matches
NeonFullyConnectedWorkload.cpp
Go to the documentation of this file.
1//
2// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
7
9
12
14
16
17#include <arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h>
18
19namespace armnn
20{
21using namespace armcomputetensorutils;
22using ACLMemManagerOnDemand = std::shared_ptr<arm_compute::MemoryManagerOnDemand>;
23
24arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
25 const TensorInfo& output,
26 const TensorInfo& weights,
27 const Optional<TensorInfo>& biases,
28 const FullyConnectedDescriptor& descriptor,
29 const ActivationDescriptor* activationDescriptor)
30{
31 const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
32 const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
33 arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);
34 aclWeights.set_are_values_constant(weights.IsConstant());
35
36 arm_compute::TensorInfo aclBiases;
37 arm_compute::TensorInfo* optionalAclBiases = nullptr;
38 if (descriptor.m_BiasEnabled)
39 {
41 biases.has_value(),
42 "NeonFullyConnectedWorkload: Bias was enabled in the descriptor but no value was supplied.");
43 aclBiases = BuildArmComputeTensorInfo(biases.value());
44 aclBiases.set_are_values_constant(biases.value().IsConstant());
45 optionalAclBiases = &aclBiases;
46 }
47
48 const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
50 return arm_compute::NEFullyConnectedLayer::validate(&aclInput,
51 &aclWeights,
52 optionalAclBiases,
53 &aclOutput,
54 fullyConnectedLayerInfo);
55}
56
58 const WorkloadInfo& info,
59 ACLMemManagerOnDemand& memoryManager)
61{
62 m_Data.ValidateInputsOutputs("NeonFullyConnectedWorkload", descriptor.m_Parameters.GetNumInputs(), 1);
63
64 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
65 arm_compute::ITensor& weights = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
66 m_WeightsTensorInfo = info.m_InputTensorInfos[1];
67 weights.info()->set_are_values_constant(m_WeightsTensorInfo.IsConstant());
68 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
69 if (m_WeightsTensorInfo.IsConstant())
70 {
71 // Copy the weights' tensor into arm_compute tensor.
72 m_WeightsTensor = std::make_unique<arm_compute::Tensor>();
73 BuildArmComputeTensor(*m_WeightsTensor, m_WeightsTensorInfo);
74 m_WeightsTensor->info()->set_are_values_constant(m_WeightsTensorInfo.IsConstant());
75 }
76
77 if (m_Data.m_Parameters.m_BiasEnabled)
78 {
79 // Copy the biases tensor into arm_compute tensor.
80 m_BiasesTensor = std::make_unique<arm_compute::Tensor>();
81 m_BiasesTensorInfo = info.m_InputTensorInfos[2];
82 BuildArmComputeTensor(*m_BiasesTensor, m_BiasesTensorInfo);
83 m_BiasesTensor->info()->set_are_values_constant(m_BiasesTensorInfo.IsConstant());
84 }
85
86 const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
87 arm_compute::FullyConnectedLayerInfo fc_info =
89
90 auto layer = std::make_unique<arm_compute::NEFullyConnectedLayer>(memoryManager);
91 if (m_WeightsTensorInfo.IsConstant())
92 {
93 layer->configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
94 }
95 else
96 {
97 layer->configure(&input, &weights, m_BiasesTensor.get(), &output, fc_info);
98 }
99 m_FullyConnectedLayer.reset(layer.release());
100
101 // Add details for profiling output
102 WorkloadInfo detailsInfo;
103
104 detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
105 detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
106
107 // Report Profiling Details
108 ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonFullyConnectedWorkload_Construct",
109 descriptor.m_Parameters,
110 detailsInfo,
111 this->GetGuid());
112}
113
115{
116 ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonFullyConnectedWorkload_Execute");
117 // The constant tensors may not be fully in place until the workload is Executed
118 if (!prepared)
119 {
120 if (m_WeightsTensorInfo.IsConstant())
121 {
122 InitializeArmComputeTensorData(*m_WeightsTensor, m_WeightsTensorInfo, m_Data.m_Inputs[1]);
123 m_WeightsTensor->info()->set_are_values_constant(m_WeightsTensorInfo.IsConstant());
124 }
125
126 if (m_Data.m_Parameters.m_BiasEnabled)
127 {
128 InitializeArmComputeTensorData(*m_BiasesTensor, m_BiasesTensorInfo, m_Data.m_Inputs[2]);
129 m_BiasesTensor->info()->set_are_values_constant(m_BiasesTensorInfo.IsConstant());
130 }
131 if (m_WeightsTensorInfo.IsConstant())
132 {
133 FreeTensorIfUnused(m_WeightsTensor);
134 }
135 if (m_BiasesTensorInfo.IsConstant())
136 {
137 FreeTensorIfUnused(m_BiasesTensor);
138 }
139 prepared = true;
140 }
141 m_FullyConnectedLayer->run();
142}
143
144} //namespace armnn
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
NeonBaseWorkload(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info)
NeonFullyConnectedWorkload(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
bool has_value() const noexcept
Definition Optional.hpp:53
bool IsConstant() const
Definition Tensor.cpp:513
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
arm_compute::FullyConnectedLayerInfo ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor &fullyConnectedDesc, const ActivationDescriptor *activationDesc)
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
std::shared_ptr< arm_compute::MemoryManagerOnDemand > ACLMemManagerOnDemand
An ActivationDescriptor for the ActivationLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
uint32_t GetNumInputs() const
Get the number of inputs.
bool m_BiasEnabled
Enable/disable bias.
Contains information about TensorInfos of a layer.
std::vector< TensorInfo > m_OutputTensorInfos
std::vector< TensorInfo > m_InputTensorInfos