6 #define LOG_TAG "arm-armnn-sl"
13 #include <type_traits>
19 const std::vector<armnn::BackendId>& backends,
21 const std::set<unsigned int>& forcedUnsupportedOperations)
24 , m_ForcedUnsupportedOperations(forcedUnsupportedOperations)
31 catch (std::exception& e)
34 VLOG(DRIVER) <<
"ModelToINetworkTransformer: Unexpected exception: " << e.what() <<
" Model was: "
40 void ModelToINetworkTransformer::Convert()
42 VLOG(DRIVER) <<
"ModelToINetworkTransformer: Convert()";
47 if (!setRunTimePoolInfosFromCanonicalMemories(&m_Data.
m_MemPools, m_Model.pools))
49 VLOG(DRIVER) <<
"Setting of run time pool infos from Hidl Memories has failed." << __func__;
58 {
"InferAndValidate",
true }
61 networkOptions.push_back(shapeInferenceMethodOption);
68 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): m_OutputSlotForOperand";
72 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): for m_Model.inputIndexes.size()";
73 for (uint32_t i = 0; i < m_Model.main.inputIndexes.size(); i++)
75 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): m_Model.inputIndexes[i]";
77 uint32_t inputIndex = m_Model.main.inputIndexes[i];
78 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): m_Model.operands[inputIndex]";
79 const Operand& operand = m_Model.main.operands[inputIndex];
80 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): GetTensorInfoForOperand(operand)";
83 const std::string layerName =
"Input_" + std::to_string(i);
84 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): m_Data.m_Network->AddInputLayer(...)";
87 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): layer->GetOutputSlot(0)";
89 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): outputSlot.SetTensorInfo(...)";
92 VLOG(DRIVER) <<
"ModelToINetworkTransformer::Convert(): store for later layers";
99 VLOG(DRIVER) << __func__ <<
"Operand type: " << e.
m_type <<
" is not supported in ArmnnDriver";
104 Fail(
"%s: Failed to convert input operand to TensorShape: %s", __func__, e.
what());
107 bool UnsupportedDynamicOperation =
false;
108 for (uint32_t operationIdx = 0; operationIdx < m_Model.main.operations.size(); operationIdx++)
110 const auto& operation = m_Model.main.operations[operationIdx];
113 if (m_ForcedUnsupportedOperations.find(operationIdx) != m_ForcedUnsupportedOperations.end())
115 Fail(
"%s: Operation at index %i has been forced to be unsupported.", __func__, operationIdx);
127 VLOG(DRIVER) << __func__ <<
"Operation type: " << e.
m_type <<
"is not supported in ArmnnDriver";
132 Fail(
"%s: Failed to convert operation in %s", __func__, e.
what());
138 m_OperationSupported.emplace(operationIdx, ok);
146 Fail(
"%s: The unsupported operation at index %i has dynamic inputs.", __func__, operationIdx);
147 UnsupportedDynamicOperation =
true;
161 if (UnsupportedDynamicOperation)
163 Fail(
"%s: Unsupported operation with dynamic inputs found. Retroactively setting all operations to unsupported",
165 for (
auto& operation : m_OperationSupported)
167 operation.second =
false;
175 for (uint32_t i = 0; i < m_Model.main.outputIndexes.size(); i++)
178 uint32_t outputIndex = m_Model.main.outputIndexes[i];
179 const auto& operand = m_Model.main.operands[outputIndex];
181 const std::string layerName =
"Output_" + std::to_string(i);
191 Fail(
"%s: Failed to convert output operand to TensorShape: %s", __func__, e.
what());
198 std::map<uint32_t, bool>::const_iterator it = m_OperationSupported.find(operationIndex);
199 assert(it != m_OperationSupported.end());