ArmNN
 25.11
Loading...
Searching...
No Matches
PadOperator.cpp File Reference
Include dependency graph for PadOperator.cpp:

Go to the source code of this file.

Functions

TosaSerializationBasicBlock * ConvertPadToTosaOperator (const Layer *layer, const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const PadDescriptor *padDescriptor)

Function Documentation

◆ ConvertPadToTosaOperator()

TosaSerializationBasicBlock * ConvertPadToTosaOperator ( const Layer * layer,
const std::vector< const TensorInfo * > & inputs,
const std::vector< const TensorInfo * > & outputs,
const PadDescriptor * padDescriptor )

Definition at line 9 of file PadOperator.cpp.

13{
14 std::string inputName = std::string("input_");
15 std::string outputName = std::string("output0_");
16 std::string blockName = std::string("Op_PAD_block_") + GetUniqueTosaMappingID();
17
18 // If a layer is present then the block will be used for execution, so input and output names need to be determined
19 // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
20 if(layer != nullptr)
21 {
22 inputName = GenerateUniqueInputName(layer->GetInputSlot(0));
23 outputName = GenerateUniqueOutputName(*layer);
24 }
25
26 std::vector<int32_t> padding;
27 padding.reserve(padDescriptor->m_PadList.size());
28 for (size_t it = 0; it < padDescriptor->m_PadList.size(); ++it) {
29 padding.push_back(static_cast<int32_t>(padDescriptor->m_PadList[it].first));
30 padding.push_back(static_cast<int32_t>(padDescriptor->m_PadList[it].second));
31 }
32
33 auto intPadValue = armnnUtils::SelectiveQuantize<int32_t>(padDescriptor->m_PadValue,
34 inputs[0]->GetQuantizationScale(),
35 inputs[0]->GetQuantizationOffset());
36 TosaPadAttribute padAttribute(padding, intPadValue ,padDescriptor->m_PadValue);
37
38 auto* op = new TosaSerializationOperator(Op_PAD,
39 Attribute_PadAttribute,
40 &padAttribute,
41 {inputName},
42 {outputName});
43
44 std::vector<TosaSerializationTensor*> tensors;
45
46 // Only add input tensors if connected layer is an input layer.
47 // As intermediate or constant tensors will be created separately.
48 // There also can't be duplicate tensor.
49 if(inputName.find("input_") != std::string::npos)
50 {
51 std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
52 DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
53
54 tensors.push_back(new TosaSerializationTensor(inputName, inputShape0, inputDType0, {}));
55 }
56
57 std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
58 DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
59
60 tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
61
62 // operatorInputNames/operatorOutputNames ends up being the same as
63 // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
64 return new TosaSerializationBasicBlock(blockName, // name
65 mainName, // region name
66 {op}, // operators
67 tensors, // tensors
68 {inputName}, // inputs
69 {outputName}); // outputs
70}
std::string GenerateUniqueOutputName(const Layer &layer, uint32_t layerSlot=0)
const std::string mainName
DType ArmNNToDType(const DataType &type)
std::string GenerateUniqueInputName(const armnn::InputSlot &slot)
std::string GetUniqueTosaMappingID()
std::vector< int32_t > GetTosaTensorShape(const TensorShape &shape)
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition Layer.hpp:337
T SelectiveQuantize(float value, float scale, int32_t offset)
float m_PadValue
Optional value to use for padding, defaults to 0.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.

References ArmNNToDType(), GenerateUniqueInputName(), GenerateUniqueOutputName(), Layer::GetInputSlot(), GetTosaTensorShape(), GetUniqueTosaMappingID(), PadDescriptor::m_PadList, PadDescriptor::m_PadValue, mainName, and armnnUtils::SelectiveQuantize().

Referenced by GetTosaMapping().