ArmNN
 26.01
Loading...
Searching...
No Matches
NeonBatchMatMulWorkload Class Reference

#include <NeonBatchMatMulWorkload.hpp>

Inheritance diagram for NeonBatchMatMulWorkload:
[legend]
Collaboration diagram for NeonBatchMatMulWorkload:
[legend]

Public Member Functions

 NeonBatchMatMulWorkload (const BatchMatMulQueueDescriptor &descriptor, const WorkloadInfo &info, const bool isFastMathEnabled)
 
virtual void Execute () const override
 
- Public Member Functions inherited from NeonBaseWorkload< BatchMatMulQueueDescriptor >
 NeonBaseWorkload (const BatchMatMulQueueDescriptor &descriptor, const WorkloadInfo &info)
 
void ReplaceInputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
void ReplaceOutputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
- Public Member Functions inherited from BaseWorkload< QueueDescriptor >
 BaseWorkload (const QueueDescriptor &descriptor, const WorkloadInfo &info)
 
virtual const std::string & GetName () const override
 
void PostAllocationConfigure () override
 
const QueueDescriptorGetData () const
 
arm::pipe::ProfilingGuid GetGuid () const final
 
virtual bool SupportsTensorHandleReplacement () const override
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 
virtual armnn::Optional< armnn::MemoryRequirementsGetMemoryRequirements ()
 

Additional Inherited Members

- Protected Member Functions inherited from NeonBaseWorkload< BatchMatMulQueueDescriptor >
virtual void Reconfigure ()
 
- Protected Attributes inherited from BaseWorkload< QueueDescriptor >
QueueDescriptor m_Data
 
const arm::pipe::ProfilingGuid m_Guid
 
const std::string m_Name
 

Detailed Description

Definition at line 22 of file NeonBatchMatMulWorkload.hpp.

Constructor & Destructor Documentation

◆ NeonBatchMatMulWorkload()

NeonBatchMatMulWorkload ( const BatchMatMulQueueDescriptor & descriptor,
const WorkloadInfo & info,
const bool isFastMathEnabled )

Definition at line 57 of file NeonBatchMatMulWorkload.cpp.

60 : NeonBaseWorkload<BatchMatMulQueueDescriptor>(descriptor, info)
61{
62 if (descriptor.m_Parameters.m_AdjointX || descriptor.m_Parameters.m_AdjointY )
63 {
64 throw Exception("Support for adjoint not implemented.");
65 }
66 if (descriptor.m_Parameters.m_DataLayoutX != armnn::DataLayout::NCHW
67 || descriptor.m_Parameters.m_DataLayoutY != armnn::DataLayout::NCHW )
68 {
69 throw Exception("Only supported the MatMul in the last 2 dimensions");
70 }
71
72 m_Data.ValidateInputsOutputs("NeonBatchMatMulWorkload", 2, 1);
73
74 arm_compute::ITensor& inputX = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
75 arm_compute::ITensor& inputY = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
76 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
77
78 // GeMM dispatches kernel handles dynamic inputs differently to static so this flag needs to be set
79 inputX.info()->set_are_values_constant(false);
80 inputY.info()->set_are_values_constant(false);
81
82 const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
83
84 arm_compute::MatMulInfo matMulInfo;
85 matMulInfo.adj_lhs(descriptor.m_Parameters.m_TransposeX);
86 matMulInfo.adj_rhs(descriptor.m_Parameters.m_TransposeY);
87
88 arm_compute::CpuMatMulSettings settings;
89 settings.fast_math(isFastMathEnabled);
90
91 m_MatMulLayer.configure(&inputX, &inputY, &output, matMulInfo, settings, activationInfo);
92
93 // Report Profiling Details
94 WorkloadInfo detailsInfo;
95 detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
96 detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
97 ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonBatchMatMulWorkload_Construct",
98 descriptor.m_Parameters,
99 detailsInfo,
100 GetGuid());
101}
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
arm::pipe::ProfilingGuid GetGuid() const final
Definition Workload.hpp:52
QueueDescriptor m_Data
Definition Workload.hpp:74
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const

References ARMNN_REPORT_PROFILING_WORKLOAD_DESC, armnn::ConvertAdditionalInfoToAclActivationLayerInfo(), BaseWorkload< QueueDescriptor >::GetGuid(), armnn::info, BatchMatMulDescriptor::m_AdjointX, BatchMatMulDescriptor::m_AdjointY, BaseWorkload< QueueDescriptor >::m_Data, BatchMatMulDescriptor::m_DataLayoutX, BatchMatMulDescriptor::m_DataLayoutY, QueueDescriptor::m_Inputs, WorkloadInfo::m_InputTensorInfos, QueueDescriptor::m_Outputs, WorkloadInfo::m_OutputTensorInfos, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, BatchMatMulDescriptor::m_TransposeX, BatchMatMulDescriptor::m_TransposeY, armnn::NCHW, and QueueDescriptor::ValidateInputsOutputs().

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 103 of file NeonBatchMatMulWorkload.cpp.

104{
105 ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonBatchMatMulWorkload_Execute");
106 m_MatMulLayer.run();
107}
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.

References ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID.


The documentation for this class was generated from the following files: