8#include <arm_compute/runtime/NEON/functions/NECast.h>
16static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
28 return arm_compute::Status();
35 this->
m_Data.ValidateInputsOutputs(
"NeonConvertFp32ToFp16Workload", 1, 1);
40 if (arm_compute::NECast::validate(input.info(), output.info(), g_AclConvertPolicy))
43 m_Cast.reset(
new arm_compute::NECast);
44 m_Cast->configure(&input, &output, g_AclConvertPolicy);
65 auto convertFunc = [](uint8_t* dst,
const uint8_t* src,
size_t size)
67 auto input =
reinterpret_cast<const float*
>(src);
68 auto output =
reinterpret_cast<Half*
>(dst);
69 size_t numElements = size/2;
73 for (
const auto& pair : m_TensorHandlePairs)
83 this->
m_Data.m_Inputs[slot] = tensorHandle;
91 this->
m_Data.m_Inputs[slot] = backupHandle;
100 this->
m_Data.m_Inputs[slot] = tensorHandle;
108 this->
m_Data.m_Inputs[slot] = backupHandle;
113void NeonConvertFp32ToFp16Workload::Reconfigure()
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
NeonConvertFp32ToFp16Workload(const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info)
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
virtual void Execute() const override
static void ConvertFloat32To16(const float *srcFloat32Buffer, size_t numElements, void *dstFloat16Buffer)
Converts a buffer of FP32 values to FP16, and stores in the given dstFloat16Buffer.
Copyright (c) 2021 ARM Limited and Contributors.
arm_compute::Status NeonConvertFp32ToFp16WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
void GatherTensorHandlePairs(const DescriptorType &descriptor, std::vector< std::pair< SrcTensorHandleType *, DstTensorHandleType * > > &tensorHandlePairs)
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
MultiTypedWorkload< QueueDescriptor, armnn::DataType::Float32, armnn::DataType::Float16 > Float32ToFloat16Workload
void CopyTensorContentsGeneric(const ITensorHandle *srcTensor, ITensorHandle *dstTensor, CopyFunc copy)
void IgnoreUnused(Ts &&...)
Contains information about TensorInfos of a layer.