ArmNN
 25.02
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
NeonConstantWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2018,2020-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <arm_compute/core/Types.h>
9 #include <BFloat16.hpp>
10 #include <Half.hpp>
15 #include "NeonBaseWorkload.hpp"
16 
17 namespace armnn
18 {
19 
21 {
22  const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
23 
24  std::array<arm_compute::DataType,10> supportedTypes = {
25  arm_compute::DataType::BFLOAT16,
26  arm_compute::DataType::F16,
27  arm_compute::DataType::F32,
28  arm_compute::DataType::QASYMM8,
29  arm_compute::DataType::QASYMM8_SIGNED,
30  arm_compute::DataType::QSYMM16,
31  arm_compute::DataType::QSYMM8,
32  arm_compute::DataType::QSYMM8_PER_CHANNEL,
33  arm_compute::DataType::S32,
34  arm_compute::DataType::S64
35  };
36  auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type());
37 
38  if (it != end(supportedTypes))
39  {
40  return arm_compute::Status{};
41  }
42  else
43  {
44  return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported DataType"};
45  }
46 }
47 
49  const WorkloadInfo& info)
51  , m_RanOnce(false)
52 {
53 }
54 
56 {
57  ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonConstantWorkload_Execute");
58 
59  using namespace armcomputetensorutils;
60 
61  // The intermediate tensor held by the corresponding layer output handler can be initialised with the
62  // given data on the first inference, then reused for subsequent inferences.
63  // The initialisation cannot happen at workload construction time since the ACL kernel for the next layer
64  // may not have been configured at the time.
65  if (!m_RanOnce)
66  {
67  const ConstantQueueDescriptor& data = this->m_Data;
68 
69  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(data.m_LayerOutput, "Output tensor handle is null.");
70  arm_compute::ITensor& output =
71  PolymorphicDowncast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
72  arm_compute::DataType computeDataType =
73  PolymorphicDowncast<NeonTensorHandle*>(data.m_Outputs[0])->GetDataType();
74 
75  switch (computeDataType)
76  {
77  case arm_compute::DataType::BFLOAT16:
78  {
79  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<BFloat16>(), output);
80  break;
81  }
82  case arm_compute::DataType::F16:
83  {
84  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<Half>(), output);
85  break;
86  }
87  case arm_compute::DataType::F32:
88  {
89  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<float>(), output);
90  break;
91  }
92  case arm_compute::DataType::QASYMM8:
93  {
94  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output);
95  break;
96  }
97  case arm_compute::DataType::QASYMM8_SIGNED:
98  {
99  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
100  break;
101  }
102  case arm_compute::DataType::QSYMM16:
103  {
104  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int16_t>(), output);
105  break;
106  }
107  case arm_compute::DataType::QSYMM8:
108  case arm_compute::DataType::QSYMM8_PER_CHANNEL:
109  {
110  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
111  break;
112  }
113  case arm_compute::DataType::S32:
114  {
115  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int32_t>(), output);
116  break;
117  }
118  case arm_compute::DataType::S64:
119  {
120  CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int64_t>(), output);
121  break;
122  }
123  default:
124  {
125  throw InvalidArgumentException("Unknown data type.");
126  }
127  }
128 
129  m_RanOnce = true;
130  }
131 }
132 
133 } //namespace armnn
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
Definition: Exceptions.hpp:210
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
QueueDescriptor m_Data
Definition: Workload.hpp:74
const T * GetConstTensor() const
NeonConstantWorkload(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info)
virtual void Execute() const override
Copyright (c) 2021 ARM Limited and Contributors.
half_float::half Half
Definition: Half.hpp:22
Status
enumeration
Definition: Types.hpp:43
DataType
Definition: Types.hpp:49
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
const ConstTensorHandle * m_LayerOutput
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.