13 using namespace armcomputetensorutils;
15 static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
20 const arm_compute::CLCompileContext& clCompileContext) :
29 m_InputProxy = std::make_unique<ICLTensorProxy>(&input);
30 m_OutputProxy = std::make_unique<ICLTensorProxy>(&output);
34 m_Layer.configure(clCompileContext, m_InputProxy.get(), m_OutputProxy.get(), g_AclConvertPolicy, 0);
48 return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR,
"Input should be Float16");
52 return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR,
"Output should be Float32");
55 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
56 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
59 &aclInputInfo, &aclOutputInfo, g_AclConvertPolicy, 0);
97 void ClConvertFp16ToFp32Workload::Reconfigure()
101 m_InputProxy->set(&input);
102 m_OutputProxy->set(&output);
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
ClConvertFp16ToFp32Workload(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
virtual void Execute() const override
DataType GetDataType() const
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
std::vector< ITensorHandle * > m_Inputs
std::vector< ITensorHandle * > m_Outputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Contains information about TensorInfos of a layer.