ArmNN
 25.11
Loading...
Searching...
No Matches
RsqrtOperator.cpp
Go to the documentation of this file.
1//
2// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "RsqrtOperator.hpp"
7#include "TosaTableUtils.hpp"
8
9TosaSerializationBasicBlock* ConvertRsqrtOperator(const Layer* layer,
10 const std::vector<const TensorInfo*>& inputs,
11 const std::vector<const TensorInfo*>& outputs,
12 const ElementwiseUnaryDescriptor* unaryDescriptor)
13{
14 if (unaryDescriptor->m_Operation != UnaryOperation::Rsqrt)
15 {
16 throw armnn::Exception("ConvertRsqrtOperator: Unsupported elementwise unary operation in descriptor.");
17 }
18
19 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(inputs.size() == 1,
20 "ConvertRsqrtOperator: Rsqrt must have only one input");
21
22 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(outputs.size() == 1,
23 "ConvertRsqrtOperator: Rsqrt must have only one output");
24
25
26 std::string inputName = std::string("input_");
27 std::string outputName = std::string("output0_");
28 std::string blockName = std::string("Op_RSQRT_block_") + GetUniqueTosaMappingID();
29 std::string supportedTypes = std::string(" Supported Types: FLOAT32, FLOAT16 & INT8.");
30
31 // If a layer is present then the block will be used for execution, so input and output names need to be determined
32 // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
33 if (layer != nullptr)
34 {
35 inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
36 outputName = GenerateUniqueOutputName(*layer);
37 }
38
39 std::vector<TosaSerializationTensor*> tensors;
40 std::vector<TosaSerializationOperator *> operators;
41
42 DataType inputDType = inputs[0]->GetDataType();
43
44 if (inputDType == DataType::QAsymmS8 || inputDType == DataType::QSymmS8)
45 {
46 float input_scale = inputs[0]->GetQuantizationScale();
47 float output_scale = outputs[0]->GetQuantizationScale();
48 int32_t input_zp = inputs[0]->GetQuantizationOffset();
49 int32_t output_zp = outputs[0]->GetQuantizationOffset();
50
51 const float output_max = static_cast<float>(127 - output_zp) * output_scale;
52
53 auto rsqrt_func = [&](float x) -> float
54 {
55 if (x <= 0.0f)
56 {
57 return output_max;
58 }
59
60 return 1.0f / std::sqrt(x);
61 };
62
63 TosaTableAttribute attribute(
64 getTosaConst8bitTable(input_scale, input_zp, output_scale, output_zp, rsqrt_func));
65
66 operators.push_back(new TosaSerializationOperator(tosa::Op_TABLE,
67 Attribute_TableAttribute,
68 &attribute,
69 {inputName},
70 {outputName}));
71 }
72 else if (inputDType == DataType::Float32 || inputDType == DataType::Float16)
73 {
74 operators.push_back(new TosaSerializationOperator(tosa::Op_RSQRT,
75 Attribute_NONE,
76 nullptr,
77 {inputName},
78 {outputName}));
79 }
80 else if (inputDType == DataType::QSymmS16)
81 {
82 throw Exception("ConvertRsqrtOperator(): unsupported datatype INT16 is not implemented yet." + supportedTypes);
83 }
84 else if (inputDType == DataType::Signed32 || inputDType == DataType::Signed64)
85 {
86 throw Exception("ConvertRsqrtOperator(): unsupported datatype INT32 or INT64." + supportedTypes);
87 }
88 else
89 {
90 throw Exception("ConvertRsqrtOperator(): TOSA specification does not support this datatype." + supportedTypes);
91 }
92
93 // Only add input tensor if connected layer is an input layer.
94 // As intermediate or constant tensors will be created separately.
95 // There also can't be duplicate tensor.
96 if (inputName.find("input_") != std::string::npos)
97 {
98 std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
99 DType inputDType0 = ArmNNToDType(inputDType);
100 tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
101 }
102
103 std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
104 DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
105
106 tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
107
108 // operatorInputNames/operatorOutputNames ends up being the same as
109 // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings
110 return new TosaSerializationBasicBlock(blockName, // name
111 mainName, // region name
112 operators, // operators
113 tensors, // tensors
114 {inputName}, // inputs
115 {outputName}); // outputs
116}
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
TosaSerializationBasicBlock * ConvertRsqrtOperator(const Layer *layer, const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const ElementwiseUnaryDescriptor *unaryDescriptor)
std::string GenerateUniqueOutputName(const Layer &layer, uint32_t layerSlot=0)
const std::string mainName
DType ArmNNToDType(const DataType &type)
std::string GenerateUniqueInputName(const armnn::InputSlot &slot)
std::string GetUniqueTosaMappingID()
std::vector< int32_t > GetTosaTensorShape(const TensorShape &shape)
std::vector< int16_t > getTosaConst8bitTable(float input_scale, int32_t input_zp, float output_scale, int32_t output_zp, std::function< float(float)> func)
Base class for all ArmNN exceptions so that users can filter to just those.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition Layer.hpp:337
DataType
Definition Types.hpp:49
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.