ArmNN
 25.11
Loading...
Searching...
No Matches
ClConvertFp16ToFp32Workload.hpp
Go to the documentation of this file.
1//
2// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
9
10#include <arm_compute/runtime/CL/functions/CLDepthConvertLayer.h>
11
12#include <cl/ICLTensorProxy.hpp>
13
14namespace armnn
15{
16
17class ClConvertFp16ToFp32Workload : public Float16ToFloat32Workload<ConvertFp16ToFp32QueueDescriptor>
18{
19public:
20
22 const WorkloadInfo& info,
23 const arm_compute::CLCompileContext& clCompileContext);
24 virtual void Execute() const override;
25
26 bool SupportsTensorHandleReplacement() const override { return true;};
27
28 // Replace input tensor handle with the given TensorHandle
29 void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
30
31 // Replace output tensor handle with the given TensorHandle
32 void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
33private:
34 mutable arm_compute::CLDepthConvertLayer m_Layer;
35 virtual void Reconfigure();
36
37 std::unique_ptr<ICLTensorProxy> m_InputProxy;
38 std::unique_ptr<ICLTensorProxy> m_OutputProxy;
39};
40
41arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output);
42
43} //namespace armnn
ClConvertFp16ToFp32Workload(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
MultiTypedWorkload< QueueDescriptor, armnn::DataType::Float16, armnn::DataType::Float32 > Float16ToFloat32Workload
Definition Workload.hpp:232
Contains information about TensorInfos of a layer.