ArmNN
 24.08
RefFakeQuantizationFloat32Workload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "RefWorkloadUtils.hpp"
9 
10 #include "Profiling.hpp"
11 
13 
14 namespace armnn
15 {
16 
17 void FakeQuantization(const float* inputData, float* outputData, uint32_t numElements, float min, float max)
18 {
19  float scale = (max - min) / 255.f;
20  int32_t offset = armnn::numeric_cast<int32_t>((-min * 255.f) / (max - min));
21 
22  for (uint32_t i = 0; i < numElements; i++)
23  {
24  outputData[i] = static_cast<float>(armnn::Quantize<uint8_t>(inputData[i], scale, offset));
25  }
26 
27 }
28 
30 {
32 }
33 
35 {
36  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
37  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
38 }
39 
40 void RefFakeQuantizationFloat32Workload::Execute(std::vector<ITensorHandle*> inputs,
41  std::vector<ITensorHandle*> outputs) const
42 {
43  ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefFakeQuantizationFloat32Workload_Execute");
44 
45  const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
46 
47  const float* inputData = reinterpret_cast<const float*>(inputs[0]->Map());
48  float* outputData = reinterpret_cast<float*>(outputs[0]->Map());
49  FakeQuantization(inputData, outputData, inputInfo.GetNumElements(),
50  m_Data.m_Parameters.m_Min,
51  m_Data.m_Parameters.m_Max);
52 }
53 
54 } //namespace armnn
armnn::TensorInfo::GetNumElements
unsigned int GetNumElements() const
Definition: Tensor.hpp:198
armnn::experimental::ExecutionData::m_Data
void * m_Data
Definition: ExecutionData.hpp:16
armnn::TensorInfo
Definition: Tensor.hpp:152
Profiling.hpp
ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: RefWorkloadUtils.hpp:22
NumericCast.hpp
armnn::GetTensorInfo
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Definition: RefWorkloadUtils.hpp:33
armnn::RefFakeQuantizationFloat32Workload::Execute
void Execute() const override
Definition: RefFakeQuantizationFloat32Workload.cpp:29
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::RefFakeQuantizationFloat32Workload::ExecuteAsync
void ExecuteAsync(ExecutionData &executionData) override
Definition: RefFakeQuantizationFloat32Workload.cpp:34
RefWorkloadUtils.hpp
armnn::BaseWorkload::m_Data
QueueDescriptor m_Data
Definition: Workload.hpp:89
RefFakeQuantizationFloat32Workload.hpp
armnn::FakeQuantization
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
Definition: RefFakeQuantizationFloat32Workload.cpp:17
armnn::experimental::WorkingMemDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkingMemDescriptor.hpp:20
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::experimental::WorkingMemDescriptor
Definition: WorkingMemDescriptor.hpp:18
armnn::experimental::WorkingMemDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkingMemDescriptor.hpp:21
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::experimental::ExecutionData
Definition: ExecutionData.hpp:14