6 #define LOG_TAG "arm-armnn-sl"
13 #include <type_traits>
19 const std::vector<armnn::BackendId>& backends,
21 const std::set<unsigned int>& forcedUnsupportedOperations)
24 , m_ForcedUnsupportedOperations(forcedUnsupportedOperations)
31 catch (std::exception& e)
34 VLOG(DRIVER) <<
"ModelToINetworkTransformer: Unexpected exception: " << e.what();
39 void ModelToINetworkTransformer::Convert()
41 VLOG(DRIVER) <<
"ModelToINetworkTransformer: Convert()";
46 if (!setRunTimePoolInfosFromCanonicalMemories(&m_Data.
m_MemPools, m_Model.pools))
48 VLOG(DRIVER) <<
"Setting of run time pool infos from Hidl Memories has failed." << __func__;
57 {
"InferAndValidate",
true }
60 networkOptions.push_back(shapeInferenceMethodOption);
67 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): m_OutputSlotForOperand";
71 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): for m_Model.inputIndexes.size()";
72 for (uint32_t i = 0; i < m_Model.main.inputIndexes.size(); i++)
74 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): m_Model.inputIndexes[i]";
76 uint32_t inputIndex = m_Model.main.inputIndexes[i];
77 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): m_Model.operands[inputIndex]";
78 const Operand& operand = m_Model.main.operands[inputIndex];
79 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): GetTensorInfoForOperand(operand)";
82 const std::string layerName =
"Input_" + std::to_string(i);
83 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): m_Data.m_Network->AddInputLayer(...)";
86 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): layer->GetOutputSlot(0)";
88 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): outputSlot.SetTensorInfo(...)";
91 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): store for later layers";
98 VLOG(DRIVER) << __func__ <<
"Operand type: " << e.
m_type <<
" is not supported in ArmnnDriver";
103 Fail(
"%s: Failed to convert input operand to TensorShape: %s", __func__, e.
what());
106 bool UnsupportedDynamicOperation =
false;
107 for (uint32_t operationIdx = 0; operationIdx < m_Model.main.operations.size(); operationIdx++)
109 const auto& operation = m_Model.main.operations[operationIdx];
112 if (m_ForcedUnsupportedOperations.find(operationIdx) != m_ForcedUnsupportedOperations.end())
114 Fail(
"%s: Operation at index %i has been forced to be unsupported.", __func__, operationIdx);
126 VLOG(DRIVER) << __func__ <<
"Operation type: " << e.
m_type <<
"is not supported in ArmnnDriver";
131 Fail(
"%s: Failed to convert operation in %s", __func__, e.
what());
137 m_OperationSupported.emplace(operationIdx, ok);
145 Fail(
"%s: The unsupported operation at index %i has dynamic inputs.", __func__, operationIdx);
146 UnsupportedDynamicOperation =
true;
160 if (UnsupportedDynamicOperation)
162 Fail(
"%s: Unsupported operation with dynamic inputs found. Retroactively setting all operations to unsupported",
164 for (
auto& operation : m_OperationSupported)
166 operation.second =
false;
174 for (uint32_t i = 0; i < m_Model.main.outputIndexes.size(); i++)
177 uint32_t outputIndex = m_Model.main.outputIndexes[i];
178 const auto& operand = m_Model.main.operands[outputIndex];
180 const std::string layerName =
"Output_" + std::to_string(i);
190 Fail(
"%s: Failed to convert output operand to TensorShape: %s", __func__, e.
what());
197 std::map<uint32_t, bool>::const_iterator it = m_OperationSupported.find(operationIndex);
198 assert(it != m_OperationSupported.end());