13 if (inputs.size() != 1)
15 throw Exception(
"ConvertDequantizeToTosaOperator: 1 input tensors required.");
18 if (outputs.size() != 1)
20 throw Exception(
"ConvertDequantizeToTosaOperator: 1 output tensor required.");
23 if (inputs[0]->HasPerAxisQuantization())
25 throw Exception(
"ConvertDequantizeToTosaOperator: Per axis quantization not currently supported.");
28 std::string inputName = std::string(
"input_");
29 std::string outputName = std::string(
"output_");
40 std::vector<TosaSerializationTensor*> tensors;
41 std::vector<TosaSerializationOperator*> operators;
43 DataType inputDType = inputs[0]->GetDataType();
44 DataType outputDType = outputs[0]->GetDataType();
51 if(inputName.find(
"input_") != std::string::npos)
53 tensors.push_back(
new TosaSerializationTensor(inputName, inputShape,
ArmNNToDType(inputDType), {}));
56 if (inputDType == DataType::Float16 ||
57 inputDType == DataType::Float32)
59 operators.push_back(
new TosaSerializationOperator(tosa::Op_CAST,
65 else if (inputDType == DataType::QAsymmS8 ||
66 inputDType == DataType::QSymmS16 ||
67 inputDType == DataType::QSymmS8)
74 float zeroPoint =
static_cast<float>(inputs[0]->GetQuantizationOffset());
75 float scale = inputs[0]->GetQuantizationScale();
78 TosaSerializationOperator* castOp =
new TosaSerializationOperator(Op_CAST,
83 operators.push_back(castOp);
84 tensors.push_back(
new TosaSerializationTensor(outputNameCast, outputShape,
ArmNNToDType(outputDType), {}));
87 TosaSerializationOperator* zeroPointOp =
nullptr;
88 TosaSerializationTensor* zeroPointTensor =
nullptr;
89 CreateConstTosaOperator<float>(outputNameZeroPoint,
95 operators.push_back(zeroPointOp);
96 tensors.push_back(zeroPointTensor);
99 TosaSerializationOperator* subOp =
new TosaSerializationOperator(Op_SUB,
102 {outputNameCast, outputNameZeroPoint},
104 operators.push_back(subOp);
105 tensors.push_back(
new TosaSerializationTensor(outputNameSub, outputShape,
ArmNNToDType(outputDType), {}));
108 TosaSerializationOperator *scaleOp =
nullptr;
109 TosaSerializationTensor* scaleTensor =
nullptr;
110 CreateConstTosaOperator<float>(outputNameScale,
116 operators.push_back(scaleOp);
117 tensors.push_back(scaleTensor);
121 TosaMulAttribute mulAttribute(shift);
122 TosaSerializationOperator* mulOp =
new TosaSerializationOperator(Op_MUL,
123 Attribute_MulAttribute,
125 {outputNameSub, outputNameScale},
127 operators.push_back(mulOp);
131 throw armnn::Exception(
"ConvertDequantizeToTosaOperator: Unsupported datatype."
132 " Only floating-point and signed quantized datatypes are supported.");
135 tensors.push_back(
new TosaSerializationTensor(outputName, outputShape,
ArmNNToDType(outputDType), {}));
139 return new TosaSerializationBasicBlock(blockName,
std::string GenerateUniqueOutputName(const Layer &layer, uint32_t layerSlot=0)
const std::string mainName
DType ArmNNToDType(const DataType &type)
std::vector< int32_t > GetTosaTensorShape(const TensorShape &shape)
std::string GenerateUniqueInputName(const armnn::InputSlot &slot)
std::string GetUniqueTosaMappingID()
Base class for all ArmNN exceptions so that users can filter to just those.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.