13{
14 std::string inputName = std::string("input_");
15 std::string outputName = std::string("output0_");
17
18
19
20 if(layer != nullptr)
21 {
24 }
25
26 std::vector<int32_t> padding;
27 padding.reserve(padDescriptor->
m_PadList.size());
28 for (
size_t it = 0; it < padDescriptor->
m_PadList.size(); ++it) {
29 padding.push_back(
static_cast<int32_t
>(padDescriptor->
m_PadList[it].first));
30 padding.push_back(
static_cast<int32_t
>(padDescriptor->
m_PadList[it].second));
31 }
32
34 inputs[0]->GetQuantizationScale(),
35 inputs[0]->GetQuantizationOffset());
36 TosaPadAttribute padAttribute(padding, intPadValue ,padDescriptor->
m_PadValue);
37
38 auto* op = new TosaSerializationOperator(Op_PAD,
39 Attribute_PadAttribute,
40 &padAttribute,
41 {inputName},
42 {outputName});
43
44 std::vector<TosaSerializationTensor*> tensors;
45
46
47
48
49 if(inputName.find("input_") != std::string::npos)
50 {
52 DType inputDType0 =
ArmNNToDType(inputs[0]->GetDataType());
53
54 tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
55 }
56
58 DType outputDType0 =
ArmNNToDType(outputs[0]->GetDataType());
59
60 tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
61
62
63
64 return new TosaSerializationBasicBlock(blockName,
66 {op},
67 tensors,
68 {inputName},
69 {outputName});
70}
std::string GenerateUniqueOutputName(const Layer &layer, uint32_t layerSlot=0)
const std::string mainName
DType ArmNNToDType(const DataType &type)
std::string GenerateUniqueInputName(const armnn::InputSlot &slot)
std::string GetUniqueTosaMappingID()
std::vector< int32_t > GetTosaTensorShape(const TensorShape &shape)
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
T SelectiveQuantize(float value, float scale, int32_t offset)
float m_PadValue
Optional value to use for padding, defaults to 0.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.