ArmNN
 24.08
ModelToINetworkTransformer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #define LOG_TAG "arm-armnn-sl"
7 
9 #include "CanonicalUtils.hpp"
10 #include "Converter.hpp"
11 
12 #include <log/log.h>
13 #include <type_traits>
14 
15 namespace armnn_driver
16 {
17 
19  const std::vector<armnn::BackendId>& backends,
20  const Model& model,
21  const std::set<unsigned int>& forcedUnsupportedOperations)
22  : m_Data(backends)
23  , m_Model(model)
24  , m_ForcedUnsupportedOperations(forcedUnsupportedOperations)
25  , m_ConversionResult(ConversionResult::Success)
26 {
27  try
28  {
29  Convert();
30  }
31  catch (std::exception& e)
32  {
33  m_ConversionResult = ConversionResult::UnsupportedFeature;
34  VLOG(DRIVER) << "ModelToINetworkTransformer: Unexpected exception: " << e.what() << " Model was: "
35  << GetModelSummary(model);
36  assert(false);
37  }
38 }
39 
40 void ModelToINetworkTransformer::Convert()
41 {
42  VLOG(DRIVER) << "ModelToINetworkTransformer: Convert()";
43  //VLOG(DRIVER) << "ModelToINetworkTransformer: Convert(): " << GetModelSummary(m_Model).c_str();
44 
45  // map the memory pool into shared pointers
46  m_Data.m_MemPools.clear();
47  if (!setRunTimePoolInfosFromCanonicalMemories(&m_Data.m_MemPools, m_Model.pools))
48  {
49  VLOG(DRIVER) << "Setting of run time pool infos from Hidl Memories has failed." << __func__;
50  m_ConversionResult = ConversionResult::ErrorMappingPools;
51  return;
52  }
53 
54  using NetworkOptions = std::vector<armnn::BackendOptions>;
55  NetworkOptions networkOptions;
56  armnn::BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
57  {
58  { "InferAndValidate", true }
59  });
60 
61  networkOptions.push_back(shapeInferenceMethodOption);
62 
63  // Create armnn::INetwork
64  m_Data.m_Network = armnn::INetwork::Create(networkOptions);
65 
66  // add operations to it
67  // track which layer outputs each operand
68  VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): m_OutputSlotForOperand";
69  m_Data.m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(m_Model.main.operands.size(), nullptr);
70  try
71  {
72  VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): for m_Model.inputIndexes.size()";
73  for (uint32_t i = 0; i < m_Model.main.inputIndexes.size(); i++)
74  {
75  VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): m_Model.inputIndexes[i]";
76  // inputs in android nn are represented by operands
77  uint32_t inputIndex = m_Model.main.inputIndexes[i];
78  VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): m_Model.operands[inputIndex]";
79  const Operand& operand = m_Model.main.operands[inputIndex];
80  VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): GetTensorInfoForOperand(operand)";
81 
82  const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
83  const std::string layerName = "Input_" + std::to_string(i);
84  VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): m_Data.m_Network->AddInputLayer(...)";
85  armnn::IConnectableLayer* layer = m_Data.m_Network->AddInputLayer(i, layerName.c_str());
86 
87  VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): layer->GetOutputSlot(0)";
88  armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
89  VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): outputSlot.SetTensorInfo(...)";
90  outputSlot.SetTensorInfo(GetTensorInfoForOperand(operand));
91 
92  VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): store for later layers";
93  // store for later layers
94  m_Data.m_OutputSlotForOperand[inputIndex] = &outputSlot;
95  }
96  }
98  {
99  VLOG(DRIVER) << __func__ << "Operand type: " << e.m_type << " is not supported in ArmnnDriver";
100  m_ConversionResult = ConversionResult::UnsupportedFeature;
101  }
102  catch (const armnn::InvalidArgumentException& e)
103  {
104  Fail("%s: Failed to convert input operand to TensorShape: %s", __func__, e.what());
105  m_ConversionResult = ConversionResult::UnsupportedFeature;
106  }
107  bool UnsupportedDynamicOperation = false;
108  for (uint32_t operationIdx = 0; operationIdx < m_Model.main.operations.size(); operationIdx++)
109  {
110  const auto& operation = m_Model.main.operations[operationIdx];
111 
112  bool ok = true;
113  if (m_ForcedUnsupportedOperations.find(operationIdx) != m_ForcedUnsupportedOperations.end())
114  {
115  Fail("%s: Operation at index %i has been forced to be unsupported.", __func__, operationIdx);
116  ok = false;
117  }
118 
119  if (ok)
120  {
121  try
122  {
123  ok = Converter::ConvertOperation(operation, m_Model, m_Data);
124  }
126  {
127  VLOG(DRIVER) << __func__ << "Operation type: " << e.m_type << "is not supported in ArmnnDriver";
128  ok = false;
129  }
130  catch (const armnn::InvalidArgumentException& e)
131  {
132  Fail("%s: Failed to convert operation in %s", __func__, e.what());
133  ok = false;
134  }
135  }
136 
137  // Store whether this operation was successfully converted.
138  m_OperationSupported.emplace(operationIdx, ok);
139 
140  // Any single operation failing will fail the entire conversion.
141  // We still need to continue and check the other ones.
142  if (!ok)
143  {
144  if (m_Data.m_DynamicInputsEncountered)
145  {
146  Fail("%s: The unsupported operation at index %i has dynamic inputs.", __func__, operationIdx);
147  UnsupportedDynamicOperation = true;
148  }
149 
150  m_ConversionResult = ConversionResult::UnsupportedFeature;
151  }
152  m_Data.m_DynamicInputsEncountered = false;
153  }
154 
155  // Due to the NNAPI partitioner not supporting partition boundaries of unknown size,
156  // any operations who's outputs connect to an unsupported operation with with dynamic inputs
157  // will cause a failure.
158 
159  // The simplest solution to this problem is to not support any operations in a model containing
160  // an unsupported operation with with dynamic inputs.
161  if (UnsupportedDynamicOperation)
162  {
163  Fail("%s: Unsupported operation with dynamic inputs found. Retroactively setting all operations to unsupported",
164  __func__);
165  for (auto& operation : m_OperationSupported)
166  {
167  operation.second = false;
168  }
169  }
170 
171  try
172  {
173  if (m_ConversionResult == ConversionResult::Success)
174  {
175  for (uint32_t i = 0; i < m_Model.main.outputIndexes.size(); i++)
176  {
177  // outputs in android nn are represented by operands
178  uint32_t outputIndex = m_Model.main.outputIndexes[i];
179  const auto& operand = m_Model.main.operands[outputIndex];
180  const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
181  const std::string layerName = "Output_" + std::to_string(i);
182  armnn::IConnectableLayer* layer = m_Data.m_Network->AddOutputLayer(i, layerName.c_str());
183 
184  assert(m_Data.m_OutputSlotForOperand[outputIndex]);
185  m_Data.m_OutputSlotForOperand[outputIndex]->Connect(layer->GetInputSlot(0));
186  }
187  }
188  }
189  catch (const armnn::InvalidArgumentException& e)
190  {
191  Fail("%s: Failed to convert output operand to TensorShape: %s", __func__, e.what());
192  m_ConversionResult = ConversionResult::UnsupportedFeature;
193  }
194 }
195 
196 bool ModelToINetworkTransformer::IsOperationSupported(uint32_t operationIndex) const
197 {
198  std::map<uint32_t, bool>::const_iterator it = m_OperationSupported.find(operationIndex);
199  assert(it != m_OperationSupported.end());
200  return it->second;
201 }
202 
203 } // armnn_driver
armnn_driver::Converter::ConvertOperation
static bool ConvertOperation(const Operation &operation, const Model &model, ConversionData &data)
Definition: Converter.cpp:22
armnn_driver::ConversionData::m_MemPools
std::vector<::android::nn::RunTimePoolInfo > m_MemPools
Definition: ConversionUtils.hpp:64
armnn_driver::Operand
::android::nn::Operand Operand
Definition: ConversionUtils.hpp:46
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn_driver::ConversionData::m_DynamicInputsEncountered
bool m_DynamicInputsEncountered
Definition: ConversionUtils.hpp:65
armnn_driver::ConversionResult::UnsupportedFeature
@ UnsupportedFeature
armnn::Exception::what
virtual const char * what() const noexcept override
Definition: Exceptions.cpp:32
ModelToINetworkTransformer.hpp
armnn_driver::ConversionResult
ConversionResult
Definition: ConversionUtils.hpp:127
CanonicalUtils.hpp
armnn::NetworkOptions
std::vector< BackendOptions > NetworkOptions
Definition: BackendOptions.hpp:16
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:53
armnn_driver
Helper classes.
Definition: ArmnnDevice.cpp:37
armnn::IOutputSlot::SetTensorInfo
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn_driver::ModelToINetworkTransformer::ModelToINetworkTransformer
ModelToINetworkTransformer(const std::vector< armnn::BackendId > &backends, const Model &model, const std::set< unsigned int > &forcedUnsupportedOperations)
Definition: ModelToINetworkTransformer.cpp:18
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:45
armnn_driver::UnsupportedOperand
Definition: CanonicalUtils.hpp:27
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn_driver::ConversionResult::ErrorMappingPools
@ ErrorMappingPools
armnn::Status::Success
@ Success
armnn_driver::ConversionData::m_OutputSlotForOperand
std::vector< armnn::IOutputSlot * > m_OutputSlotForOperand
Definition: ConversionUtils.hpp:63
armnn_driver::GetModelSummary
std::string GetModelSummary(const Model &model)
Definition: CanonicalUtils.cpp:526
armnn_driver::ModelToINetworkTransformer::IsOperationSupported
bool IsOperationSupported(uint32_t operationIndex) const
Definition: ModelToINetworkTransformer.cpp:196
armnn::BackendOptions
Struct for the users to pass backend specific options.
Definition: BackendOptions.hpp:22
armnn_driver::ConversionData::m_Network
armnn::INetworkPtr m_Network
Definition: ConversionUtils.hpp:62
armnn_driver::ConversionResult::Success
@ Success
armnn_driver::GetTensorInfoForOperand
armnn::TensorInfo GetTensorInfoForOperand(const Operand &operand)
Definition: CanonicalUtils.cpp:97
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn_driver::UnsupportedOperand::m_type
OperandType m_type
Definition: CanonicalUtils.hpp:35
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::INetwork::Create
static INetworkPtr Create(const NetworkOptions &networkOptions={})
Definition: Network.cpp:682
Converter.hpp