18 using namespace armcomputetensorutils;
24 const arm_compute::TensorInfo aclInputInfo0 = BuildArmComputeTensorInfo(input0);
25 const arm_compute::TensorInfo aclInputInfo1 = BuildArmComputeTensorInfo(input1);
26 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
36 const arm_compute::CLCompileContext& clCompileContext)
47 arm_compute::ICLTensor& input0 = PolymorphicDowncast<ClTensorHandle*>(
m_Data.
m_Inputs[0])->GetTensor();
48 arm_compute::ICLTensor& input1 = PolymorphicDowncast<ClTensorHandle*>(
m_Data.
m_Inputs[1])->GetTensor();
49 arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(
m_Data.
m_Outputs[0])->GetTensor();
53 m_LogicalAndLayer.configure(clCompileContext, &input0, &input1, &output);
60 m_LogicalAndLayer.run();
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
ClLogicalAndWorkload(const LogicalBinaryQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
virtual void Execute() const override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
LayerDescriptor m_Parameters
Contains information about TensorInfos of a layer.