ArmNN
 24.08
ReluOperator.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 //
6 // Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
7 // SPDX-License-Identifier: Apache-2.0
8 //
9 
10 #include "LeakyReluOperator.hpp"
12 
14 
15 // This function is paraphrased from:
16 // tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc from function ConvertTFLReluOp
17 TosaSerializationBasicBlock* ConvertReluToTosaOperator(const Layer* layer,
18  const std::vector<const TensorInfo*>& inputs,
19  const std::vector<const TensorInfo*>& outputs,
20  const ActivationDescriptor* desc)
21 {
22  if (inputs.size() != 1)
23  {
24  throw armnn::Exception("ConvertReluToTosaOperator: 1 input tensors required.");
25  }
26 
27  if (outputs.size() != 1)
28  {
29  throw armnn::Exception("ConvertReluToTosaOperator: 1 output tensor required.");
30  }
31 
32  std::string inputName = std::string("input_");
33  std::string outputName = std::string("output0_");
34  std::string blockName = "";
35 
36  int32_t clamp_min = 0;
37  int32_t clamp_max = 0;
38  float float_max = 0.0f;
39  switch (desc->m_Function)
40  {
41  case ActivationFunction::ReLu:
42  {
43  clamp_max = std::numeric_limits<int32_t>::max();
44  float_max = std::numeric_limits<float>::max();
45  blockName = std::string("Op_RELU_block_") + GetUniqueTosaMappingID();
46  break;
47  }
48  case ActivationFunction::BoundedReLu:
49  {
50  clamp_max = static_cast<int32_t>(desc->m_A);
51  float_max = desc->m_A;
52  blockName = std::string("Op_BOUNDED_RELU_block_") + GetUniqueTosaMappingID();
53  break;
54  }
55  case ActivationFunction::LeakyReLu:
56  {
57  throw Exception("LeakyRelu TOSA mappings are performed in ConvertLeakyReluToTosaOperator().");
58  }
59  default:
60  {
61  throw Exception("Activation function is not supported in ConvertReluToTosaOperator().");
62  }
63  }
64 
65  // If a layer is present then the block will be used for execution, so input and output names need to be determined
66  // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
67  if (layer != nullptr)
68  {
69  inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
70  outputName = GenerateUniqueOutputName(*layer);
71  }
72 
73  std::vector<TosaSerializationTensor*> tensors;
74  std::vector<TosaSerializationOperator*> operators;
75 
76  // Only add input tensors if connected layer is an input layer.
77  // As intermediate or constant tensors will be created separately.
78  // There also can't be duplicate tensor.
79  std::vector<int32_t> inputShape0;
80  DType inputDType0 = DType::DType_UNKNOWN;
81  if(inputName.find("input_") != std::string::npos)
82  {
83  inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
84  inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
85  tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
86  }
87 
88  std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
89  DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
90  tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
91 
92  std::string clampInputNameStr = inputName;
93  if (inputDType0 == tosa::DType::DType_INT8 || inputDType0 == tosa::DType::DType_INT16)
94  {
95  std::string outputNameRescale = std::string("intermediate0_") + GetUniqueTosaMappingID();
96  clampInputNameStr = outputNameRescale;
97 
98  double scale = inputs[0]->GetQuantizationScale() / outputs[0]->GetQuantizationScale();
99  int32_t input_zp = inputs[0]->GetQuantizationOffset();
100  int32_t output_zp = outputs[0]->GetQuantizationOffset();
101 
102  clamp_min = output_zp;
103 
104  if (desc->m_Function == ActivationFunction::BoundedReLu)
105  {
106  clamp_max = static_cast<int32_t>(std::round(desc->m_A / outputs[0]->GetQuantizationScale())) + output_zp;
107  }
108 
109  if (inputDType0 == tosa::DType::DType_INT8)
110  {
111  clamp_min =
112  clamp_min < std::numeric_limits<int8_t>::min() ? std::numeric_limits<int8_t>::min() : clamp_min;
113  clamp_max =
114  clamp_max > std::numeric_limits<int8_t>::max() ? std::numeric_limits<int8_t>::max() : clamp_max;
115  }
116  else
117  {
118  clamp_min =
119  clamp_min < std::numeric_limits<int16_t>::min() ? std::numeric_limits<int16_t>::min() : clamp_min;
120  clamp_max =
121  clamp_max > std::numeric_limits<int16_t>::max() ? std::numeric_limits<int16_t>::max() : clamp_max;
122  }
123 
124  TosaSerializationOperator* rescaleOp = nullptr;
125  CreateRescaleTosaOperator(inputName,
126  outputNameRescale,
127  scale,
128  input_zp,
129  output_zp,
130  false,
131  true,
132  &rescaleOp);
133  operators.push_back(rescaleOp);
134  tensors.push_back(new TosaSerializationTensor(outputNameRescale,
135  inputShape0,
136  inputDType0,
137  {}));
138  }
139 
140  TosaClampAttribute attribute(clamp_min, clamp_max, 0, float_max);
141  auto* clamp_op = new TosaSerializationOperator(Op_CLAMP,
142  Attribute_ClampAttribute,
143  &attribute,
144  {clampInputNameStr},
145  {outputName});
146  operators.push_back(clamp_op);
147 
148  // operatorInputNames/operatorOutputNames ends up being the same as
149  // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
150  return new TosaSerializationBasicBlock(blockName, // name
151  mainName, // region name
152  operators, // operators
153  tensors, // tensors
154  {inputName}, // inputs
155  {outputName}); // outputs
156 }
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
ActivationLayer.hpp
TosaRescaleOperatorUtils.hpp
GenerateUniqueOutputName
std::string GenerateUniqueOutputName(const Layer &layer, uint32_t layerSlot=0)
Definition: TosaOperatorUtils.hpp:120
LeakyReluOperator.hpp
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::Layer
Definition: Layer.hpp:230
mainName
const std::string mainName
Definition: TosaOperatorUtils.hpp:19
ArmNNToDType
DType ArmNNToDType(const DataType &type)
Definition: TosaOperatorUtils.hpp:22
ConvertReluToTosaOperator
TosaSerializationBasicBlock * ConvertReluToTosaOperator(const Layer *layer, const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const ActivationDescriptor *desc)
Definition: ReluOperator.cpp:17
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
GetTosaTensorShape
std::vector< int32_t > GetTosaTensorShape(const TensorShape &shape)
Definition: TosaOperatorUtils.hpp:79
CreateRescaleTosaOperator
void CreateRescaleTosaOperator(const std::string &inputName, const std::string &outputName, const std::vector< int32_t > &multipliers, const std::vector< int32_t > &shifts, int32_t input_zp, int32_t output_zp, bool double_round, bool scale32, bool per_channel, TosaSerializationOperator **op)
Definition: TosaRescaleOperatorUtils.hpp:10
GenerateUniqueInputName
std::string GenerateUniqueInputName(const armnn::InputSlot &slot)
Definition: TosaOperatorUtils.hpp:109
GetUniqueTosaMappingID
std::string GetUniqueTosaMappingID()
Definition: TosaOperatorUtils.hpp:138