ArmNN
 25.11
Loading...
Searching...
No Matches
NeonFullyConnectedWorkload Class Reference

#include <NeonFullyConnectedWorkload.hpp>

Inheritance diagram for NeonFullyConnectedWorkload:
[legend]
Collaboration diagram for NeonFullyConnectedWorkload:
[legend]

Public Member Functions

 NeonFullyConnectedWorkload (const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
virtual void Execute () const override
Public Member Functions inherited from NeonBaseWorkload< FullyConnectedQueueDescriptor >
 NeonBaseWorkload (const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info)
void ReplaceInputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
void ReplaceOutputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
Public Member Functions inherited from BaseWorkload< FullyConnectedQueueDescriptor >
 BaseWorkload (const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info)
virtual const std::string & GetName () const override
void PostAllocationConfigure () override
const FullyConnectedQueueDescriptorGetData () const
arm::pipe::ProfilingGuid GetGuid () const final
virtual bool SupportsTensorHandleReplacement () const override
Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
virtual armnn::Optional< armnn::MemoryRequirementsGetMemoryRequirements ()

Additional Inherited Members

Protected Member Functions inherited from NeonBaseWorkload< FullyConnectedQueueDescriptor >
virtual void Reconfigure ()
Protected Attributes inherited from BaseWorkload< FullyConnectedQueueDescriptor >
FullyConnectedQueueDescriptor m_Data
const arm::pipe::ProfilingGuid m_Guid
const std::string m_Name

Detailed Description

Definition at line 27 of file NeonFullyConnectedWorkload.hpp.

Constructor & Destructor Documentation

◆ NeonFullyConnectedWorkload()

NeonFullyConnectedWorkload ( const FullyConnectedQueueDescriptor & descriptor,
const WorkloadInfo & info,
std::shared_ptr< arm_compute::MemoryManagerOnDemand > & memoryManager )

Definition at line 57 of file NeonFullyConnectedWorkload.cpp.

60 : NeonBaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
61{
62 m_Data.ValidateInputsOutputs("NeonFullyConnectedWorkload", descriptor.m_Parameters.GetNumInputs(), 1);
63
64 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
65 arm_compute::ITensor& weights = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
66 m_WeightsTensorInfo = info.m_InputTensorInfos[1];
67 weights.info()->set_are_values_constant(m_WeightsTensorInfo.IsConstant());
68 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
69 if (m_WeightsTensorInfo.IsConstant())
70 {
71 // Copy the weights' tensor into arm_compute tensor.
72 m_WeightsTensor = std::make_unique<arm_compute::Tensor>();
73 BuildArmComputeTensor(*m_WeightsTensor, m_WeightsTensorInfo);
74 m_WeightsTensor->info()->set_are_values_constant(m_WeightsTensorInfo.IsConstant());
75 }
76
77 if (m_Data.m_Parameters.m_BiasEnabled)
78 {
79 // Copy the biases tensor into arm_compute tensor.
80 m_BiasesTensor = std::make_unique<arm_compute::Tensor>();
81 m_BiasesTensorInfo = info.m_InputTensorInfos[2];
82 BuildArmComputeTensor(*m_BiasesTensor, m_BiasesTensorInfo);
83 m_BiasesTensor->info()->set_are_values_constant(m_BiasesTensorInfo.IsConstant());
84 }
85
86 const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
87 arm_compute::FullyConnectedLayerInfo fc_info =
88 ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor.m_Parameters, activationInfo);
89
90 auto layer = std::make_unique<arm_compute::NEFullyConnectedLayer>(memoryManager);
91 if (m_WeightsTensorInfo.IsConstant())
92 {
93 layer->configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
94 }
95 else
96 {
97 layer->configure(&input, &weights, m_BiasesTensor.get(), &output, fc_info);
98 }
99 m_FullyConnectedLayer.reset(layer.release());
100
101 // Add details for profiling output
102 WorkloadInfo detailsInfo;
103
104 detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
105 detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
106
107 // Report Profiling Details
108 ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonFullyConnectedWorkload_Construct",
109 descriptor.m_Parameters,
110 detailsInfo,
111 this->GetGuid());
112}
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
arm_compute::FullyConnectedLayerInfo ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor &fullyConnectedDesc, const ActivationDescriptor *activationDesc)

References ARMNN_REPORT_PROFILING_WORKLOAD_DESC, armnn::ConvertAdditionalInfoToAclActivationLayerInfo(), armnn::ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(), FullyConnectedDescriptor::GetNumInputs(), armnn::info, BaseWorkload< FullyConnectedQueueDescriptor >::m_Data, WorkloadInfo::m_InputTensorInfos, WorkloadInfo::m_OutputTensorInfos, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, NeonBaseWorkload< FullyConnectedQueueDescriptor >::NeonBaseWorkload(), and armnn::PolymorphicDowncast().

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 114 of file NeonFullyConnectedWorkload.cpp.

115{
116 ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonFullyConnectedWorkload_Execute");
117 // The constant tensors may not be fully in place until the workload is Executed
118 if (!prepared)
119 {
120 if (m_WeightsTensorInfo.IsConstant())
121 {
122 InitializeArmComputeTensorData(*m_WeightsTensor, m_WeightsTensorInfo, m_Data.m_Inputs[1]);
123 m_WeightsTensor->info()->set_are_values_constant(m_WeightsTensorInfo.IsConstant());
124 }
125
126 if (m_Data.m_Parameters.m_BiasEnabled)
127 {
128 InitializeArmComputeTensorData(*m_BiasesTensor, m_BiasesTensorInfo, m_Data.m_Inputs[2]);
129 m_BiasesTensor->info()->set_are_values_constant(m_BiasesTensorInfo.IsConstant());
130 }
131 if (m_WeightsTensorInfo.IsConstant())
132 {
133 FreeTensorIfUnused(m_WeightsTensor);
134 }
135 if (m_BiasesTensorInfo.IsConstant())
136 {
137 FreeTensorIfUnused(m_BiasesTensor);
138 }
139 prepared = true;
140 }
141 m_FullyConnectedLayer->run();
142}
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)

References ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID, armnn::InitializeArmComputeTensorData(), and BaseWorkload< FullyConnectedQueueDescriptor >::m_Data.


The documentation for this class was generated from the following files: