ArmNN
 24.08
GeluOperator.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 //
6 // Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
7 // SPDX-License-Identifier: Apache-2.0
8 //
9 
10 #include "GeluOperator.hpp"
11 #include "TosaTableUtils.hpp"
12 
14 
15 // This function is paraphrased from:
16 // tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc from function ConvertTFLGeluOp
17 TosaSerializationBasicBlock* ConvertGeluToTosaOperator(const Layer* layer,
18  const std::vector<const TensorInfo*>& inputs,
19  const std::vector<const TensorInfo*>& outputs,
20  const ActivationDescriptor* desc)
21 {
22  if (inputs.size() != 1)
23  {
24  throw armnn::Exception("ConvertGeluToTosaOperator: 1 input tensors required.");
25  }
26 
27  if (outputs.size() != 1)
28  {
29  throw armnn::Exception("ConvertGeluToTosaOperator: 1 output tensor required.");
30  }
31 
32  if (desc->m_Function != ActivationFunction::Gelu)
33  {
34  throw armnn::Exception("ConvertGeluToTosaOperator ActivationDescriptor only supports function Gelu.");
35  }
36 
37  std::string inputName = std::string("input_");
38  std::string outputName = std::string("output0_");
39  std::string blockName = std::string("Op_GELU_block_") + GetUniqueTosaMappingID();
40 
41  // If a layer is present then the block will be used for execution, so input and output names need to be determined
42  // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
43  if (layer != nullptr)
44  {
45  inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
46  outputName = GenerateUniqueOutputName(*layer);
47  }
48 
49  std::vector<TosaSerializationTensor*> tensors;
50  std::vector<TosaSerializationOperator*> operators;
51 
52  float input_scale = inputs[0]->GetQuantizationScale();
53  float output_scale = outputs[0]->GetQuantizationScale();
54  int32_t input_zp = inputs[0]->GetQuantizationOffset();
55  int32_t output_zp = outputs[0]->GetQuantizationOffset();
56  DataType inputDType = inputs[0]->GetDataType();
57 
58  bool isInt8 = inputDType == DataType::QAsymmS8 || inputDType == DataType::QSymmS8;
59  if (isInt8)
60  {
61  auto gelu_transform = [](float in) -> float {
62  return 0.5f * in * std::erfc(in * static_cast<float>(-0.70710678118654752440));
63  };
64 
65  TosaTableAttribute attribute(
66  getTosaConst8bitTable(input_scale, input_zp, output_scale, output_zp, gelu_transform));
67  operators.push_back(new TosaSerializationOperator(tosa::Op_TABLE,
68  Attribute_TableAttribute,
69  &attribute,
70  {inputName},
71  {outputName}));
72  }
73  else if (inputDType == DataType::QSymmS16 ||
74  inputDType == DataType::Signed32 ||
75  inputDType == DataType::Signed64)
76  {
77  throw Exception("ConvertGeluOperator() only supports int8 quantized types.");
78  }
79  else
80  {
81  throw Exception("ConvertGeluOperator() floating point types currently unimplemented.");
82  }
83 
84  // Only add input tensors if connected layer is an input layer.
85  // As intermediate or constant tensors will be created separately.
86  // There also can't be duplicate tensor.
87  std::vector<int32_t> inputShape0;
88  DType inputDType0 = DType::DType_UNKNOWN;
89  if(inputName.find("input_") != std::string::npos)
90  {
91  inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
92  inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
93  tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
94  }
95 
96  std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
97  DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
98  tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
99 
100  // operatorInputNames/operatorOutputNames ends up being the same as
101  // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
102  return new TosaSerializationBasicBlock(blockName, // name
103  mainName, // region name
104  operators, // operators
105  tensors, // tensors
106  {inputName}, // inputs
107  {outputName}); // outputs
108 }
getTosaConst8bitTable
std::vector< int16_t > getTosaConst8bitTable(float input_scale, int32_t input_zp, float output_scale, int32_t output_zp, std::function< float(float)> func)
Definition: TosaTableUtils.hpp:19
ConvertGeluToTosaOperator
TosaSerializationBasicBlock * ConvertGeluToTosaOperator(const Layer *layer, const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const ActivationDescriptor *desc)
Definition: GeluOperator.cpp:17
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
GeluOperator.hpp
ActivationLayer.hpp
GenerateUniqueOutputName
std::string GenerateUniqueOutputName(const Layer &layer, uint32_t layerSlot=0)
Definition: TosaOperatorUtils.hpp:120
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::Layer
Definition: Layer.hpp:230
mainName
const std::string mainName
Definition: TosaOperatorUtils.hpp:19
ArmNNToDType
DType ArmNNToDType(const DataType &type)
Definition: TosaOperatorUtils.hpp:22
armnn::DataType
DataType
Definition: Types.hpp:48
TosaTableUtils.hpp
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
GetTosaTensorShape
std::vector< int32_t > GetTosaTensorShape(const TensorShape &shape)
Definition: TosaOperatorUtils.hpp:79
GenerateUniqueInputName
std::string GenerateUniqueInputName(const armnn::InputSlot &slot)
Definition: TosaOperatorUtils.hpp:109
GetUniqueTosaMappingID
std::string GetUniqueTosaMappingID()
Definition: TosaOperatorUtils.hpp:138