ArmNN
 24.08
ClMinimumWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClMinimumWorkload.hpp"
7 
8 #include "ClWorkloadUtils.hpp"
9 
12 
14 
15 #include <cl/ClLayerSupport.hpp>
16 #include <cl/ClTensorHandle.hpp>
17 #include <cl/ClLayerSupport.hpp>
18 
19 namespace armnn
20 {
21 
22 using namespace armcomputetensorutils;
23 
25  const TensorInfo& input1,
26  const TensorInfo& output)
27 {
28  const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
29  const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
30  const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
31 
32  const arm_compute::Status aclStatus = arm_compute::CLElementwiseMin::validate(&aclInput0Info,
33  &aclInput1Info,
34  &aclOutputInfo);
35 
36  return aclStatus;
37 }
38 
40  const WorkloadInfo& info,
41  const arm_compute::CLCompileContext& clCompileContext)
43 {
44  m_Data.ValidateInputsOutputs("ClMinimumWorkload", 2, 1);
45 
46  arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
47  arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
48  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
49 
50  {
51  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMinimumWorkload_configure");
52  m_MinimumLayer.configure(clCompileContext, &input0, &input1, &output);
53  }
54 }
55 
57 {
58  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClMinimumWorkload_Execute");
59  RunClFunction(m_MinimumLayer, CHECK_LOCATION());
60 }
61 
62 } //namespace armnn
armnn::ClMinimumWorkload::ClMinimumWorkload
ClMinimumWorkload(const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
Definition: ClMinimumWorkload.cpp:39
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:447
armnn::TensorInfo
Definition: Tensor.hpp:152
ClLayerSupport.hpp
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::ClBaseWorkload
Definition: ClBaseWorkload.hpp:13
ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: ClWorkloadUtils.hpp:36
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
ClWorkloadUtils.hpp
ArmComputeUtils.hpp
armnn::BoostLogSeverityMapping::info
@ info
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::ClMinimumWorkload::Execute
void Execute() const override
Definition: ClMinimumWorkload.cpp:56
armnn::IClTensorHandle
Definition: IClTensorHandle.hpp:13
armnn::ClMinimumWorkloadValidate
arm_compute::Status ClMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Definition: ClMinimumWorkload.cpp:24
TensorHandle.hpp
armnn::Status
Status
Definition: Types.hpp:42
ClMinimumWorkload.hpp
ClTensorHandle.hpp
armnn::BaseWorkload< MinimumQueueDescriptor >::m_Data
MinimumQueueDescriptor m_Data
Definition: Workload.hpp:89
armnn::RunClFunction
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
Definition: ClWorkloadUtils.hpp:167
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::MinimumQueueDescriptor
Definition: WorkloadData.hpp:473
ArmComputeTensorUtils.hpp
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26