ArmNN
 24.08
TosaRefLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 
10 #include <armnn/Types.hpp>
12 
13 #include <graph_status.h>
14 #include <model_runner.h>
15 
16 #include <vector>
17 
18 namespace armnn
19 {
20 
22  const std::vector<TensorInfo>& infos,
23  const BaseDescriptor& descriptor,
24  const Optional<LstmInputParamsInfo>& lstmParamsInfo,
25  const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
26  Optional<std::string&> reasonIfUnsupported) const
27 {
28  IgnoreUnused(lstmParamsInfo);
29  IgnoreUnused(quantizedLstmInputParamsInfo);
30  IgnoreUnused(reasonIfUnsupported);
31 
32  std::vector<const TensorInfo*> inputInfos;
33  std::vector<const TensorInfo*> outputInfos;
34 
35  switch (type)
36  {
38  inputInfos.push_back(&infos[0]);
39  outputInfos.push_back(&infos[1]);
40  break;
41  case LayerType::Input:
42  case LayerType::Output:
43  return true;
49  // Setup inputs and outputs
50  inputInfos.push_back(&infos[0]);
51  inputInfos.push_back(&infos[1]);
52  outputInfos.push_back(&infos[2]);
53  break;
54  case LayerType::Concat:
55  for (unsigned int i = 0; i < infos.size() - 1; ++i)
56  {
57  inputInfos.push_back(&infos[i]);
58  }
59  outputInfos.push_back(&infos.back());
60  break;
62  outputInfos.push_back(&infos[0]);
63  break;
65  {
66  inputInfos.push_back(&infos[0]); // input
67  outputInfos.push_back(&infos[1]); // output
68  inputInfos.push_back(&infos[2]); // weights
69 
70  auto conv2dDesc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
71  if(conv2dDesc->m_BiasEnabled)
72  {
73  inputInfos.push_back(&infos[3]); // bias
74  }
75  break;
76  }
78  {
79  inputInfos.push_back(&infos[0]); // input
80  outputInfos.push_back(&infos[1]); // output
81  inputInfos.push_back(&infos[2]); // weights
82 
83  auto conv2dDesc = PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor);
84  if(conv2dDesc->m_BiasEnabled)
85  {
86  inputInfos.push_back(&infos[3]); // bias
87  }
88  break;
89  }
91  {
92  inputInfos.push_back(&infos[0]); // input
93  outputInfos.push_back(&infos[1]); // output
94  inputInfos.push_back(&infos[2]); // weights
95  auto fullyConnectedDesc = PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor);
96  if(fullyConnectedDesc->m_BiasEnabled)
97  {
98  inputInfos.push_back(&infos[3]); // bias
99  }
100  break;
101  }
103  case LayerType::Pad:
105  case LayerType::Mean:
106  case LayerType::Quantize:
107  case LayerType::Reduce:
108  case LayerType::Reshape:
109  case LayerType::Resize:
110  case LayerType::Slice:
111  case LayerType::Softmax:
113  {
114  inputInfos.push_back(&infos[0]);
115  outputInfos.push_back(&infos[1]);
116  break;
117  }
118  case LayerType::Splitter:
119  {
120  inputInfos.push_back(&infos[0]);
121  for (unsigned int i = 1; i < infos.size(); ++i)
122  {
123  outputInfos.push_back(&infos[i]);
124  }
125  break;
126  }
128  {
129  inputInfos.push_back(&infos[0]); // input
130  outputInfos.push_back(&infos[1]); // output
131  inputInfos.push_back(&infos[2]); // weights
132 
133  auto conv2dDesc = PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor);
134  if(conv2dDesc->m_BiasEnabled)
135  {
136  inputInfos.push_back(&infos[3]); // bias
137  }
138  break;
139  }
140  default:
141  // Default to false for all unsupported layers.
142  return false;
143  }
144 
145  auto mappings = GetTosaMapping(nullptr, type, inputInfos, outputInfos, descriptor);
146  if (mappings->GetName() == "")
147  {
148  // There currently isn't a TOSA mapping for this layer, as the default was returned.
149  return false;
150  }
151 
152  TosaSerializationHandler handler;
153 
154  // Add all mappings to main block.
155  auto* block = new TosaSerializationBasicBlock("main",
156  "main",
157  mappings->GetOperators(),
158  mappings->GetTensors(),
159  mappings->GetInputs(),
160  mappings->GetOutputs());
161 
162  std::vector<TosaSerializationBasicBlock*> blocks;
163  blocks.emplace_back(block);
164 
165  // Add blocks to the main region.
166  auto* region = new TosaSerializationRegion("main", blocks);
167  handler.GetRegions().emplace_back(region);
168 
169  GraphStatus status;
170  TosaReference::IModelRunner runner;
171 
172 #if !defined(TOSA_REFERENCE_MODEL_OUTPUT)
173  // There currently isn't a way to disable the output from the TOSA Reference Model, but it does have a file pointer
174  // to write debug output to, so set this to /dev/null (if it exists on the system) to hide the output.
175  func_debug_t funcDebug;
176 
177  FILE* file = fopen("/dev/null", "w");
178  funcDebug.func_debug_file = (file == nullptr) ? stderr : file;
179 
180  runner.setFuncDebug(funcDebug);
181 #endif
182 
183  // Initialise the model runner with the TosaSerializationHandler, which runs validation on the mapping.
184  status = runner.initialize(handler);
185 
186 #if !defined(TOSA_REFERENCE_MODEL_OUTPUT)
187  // Reset FuncDebug as they can persist across multiple IModelRunner instances.
188  funcDebug.func_debug_file = stderr;
189  runner.setFuncDebug(funcDebug);
190 #endif
191 
192  if(status == GraphStatus::TOSA_ERROR || status == GraphStatus::TOSA_UNPREDICTABLE)
193  {
194  return false;
195  }
196  else
197  {
198  return true;
199  }
200 }
201 
202 } // namespace armnn
armnn::LayerType::Splitter
@ Splitter
armnn::Optional
Definition: Optional.hpp:270
armnn::LayerType::Transpose
@ Transpose
IgnoreUnused.hpp
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::Slice
@ Slice
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Concat
@ Concat
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Quantize
@ Quantize
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::Addition
@ Addition
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
GetTosaMapping
TosaSerializationBasicBlock * GetTosaMapping(const Layer *layer, const LayerType type, const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const BaseDescriptor &descriptor)
Definition: TosaMappings.cpp:18
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::TosaRefLayerSupport::IsLayerSupported
bool IsLayerSupported(const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override
Default implementation of the ILayerSupport interface, Backends should implement this as a switch sta...
Definition: TosaRefLayerSupport.cpp:21
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Reshape
@ Reshape
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
Types.hpp
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Mean
@ Mean
armnn::LayerType::Input
@ Input
TosaRefLayerSupport.hpp
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::Activation
@ Activation
TosaMappings.hpp
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant