ArmNN
 24.08
TosaRefLayerSupport Class Reference

#include <TosaRefLayerSupport.hpp>

Inheritance diagram for TosaRefLayerSupport:
[legend]
Collaboration diagram for TosaRefLayerSupport:
[legend]

Public Member Functions

bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override
 Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported. More...
 

Additional Inherited Members

- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 12 of file TosaRefLayerSupport.hpp.

Member Function Documentation

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType type,
const std::vector< TensorInfo > &  infos,
const BaseDescriptor descriptor,
const Optional< LstmInputParamsInfo > &  lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > &  quantizedLstmParamsInfo,
Optional< std::string & >  reasonIfUnsupported 
) const
overridevirtual

Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported.

Reimplemented from ILayerSupport.

Definition at line 21 of file TosaRefLayerSupport.cpp.

27 {
28  IgnoreUnused(lstmParamsInfo);
29  IgnoreUnused(quantizedLstmInputParamsInfo);
30  IgnoreUnused(reasonIfUnsupported);
31 
32  std::vector<const TensorInfo*> inputInfos;
33  std::vector<const TensorInfo*> outputInfos;
34 
35  switch (type)
36  {
38  inputInfos.push_back(&infos[0]);
39  outputInfos.push_back(&infos[1]);
40  break;
41  case LayerType::Input:
42  case LayerType::Output:
43  return true;
49  // Setup inputs and outputs
50  inputInfos.push_back(&infos[0]);
51  inputInfos.push_back(&infos[1]);
52  outputInfos.push_back(&infos[2]);
53  break;
54  case LayerType::Concat:
55  for (unsigned int i = 0; i < infos.size() - 1; ++i)
56  {
57  inputInfos.push_back(&infos[i]);
58  }
59  outputInfos.push_back(&infos.back());
60  break;
62  outputInfos.push_back(&infos[0]);
63  break;
65  {
66  inputInfos.push_back(&infos[0]); // input
67  outputInfos.push_back(&infos[1]); // output
68  inputInfos.push_back(&infos[2]); // weights
69 
70  auto conv2dDesc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
71  if(conv2dDesc->m_BiasEnabled)
72  {
73  inputInfos.push_back(&infos[3]); // bias
74  }
75  break;
76  }
78  {
79  inputInfos.push_back(&infos[0]); // input
80  outputInfos.push_back(&infos[1]); // output
81  inputInfos.push_back(&infos[2]); // weights
82 
83  auto conv2dDesc = PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor);
84  if(conv2dDesc->m_BiasEnabled)
85  {
86  inputInfos.push_back(&infos[3]); // bias
87  }
88  break;
89  }
91  {
92  inputInfos.push_back(&infos[0]); // input
93  outputInfos.push_back(&infos[1]); // output
94  inputInfos.push_back(&infos[2]); // weights
95  auto fullyConnectedDesc = PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor);
96  if(fullyConnectedDesc->m_BiasEnabled)
97  {
98  inputInfos.push_back(&infos[3]); // bias
99  }
100  break;
101  }
103  case LayerType::Pad:
105  case LayerType::Mean:
106  case LayerType::Quantize:
107  case LayerType::Reduce:
108  case LayerType::Reshape:
109  case LayerType::Resize:
110  case LayerType::Slice:
111  case LayerType::Softmax:
113  {
114  inputInfos.push_back(&infos[0]);
115  outputInfos.push_back(&infos[1]);
116  break;
117  }
118  case LayerType::Splitter:
119  {
120  inputInfos.push_back(&infos[0]);
121  for (unsigned int i = 1; i < infos.size(); ++i)
122  {
123  outputInfos.push_back(&infos[i]);
124  }
125  break;
126  }
128  {
129  inputInfos.push_back(&infos[0]); // input
130  outputInfos.push_back(&infos[1]); // output
131  inputInfos.push_back(&infos[2]); // weights
132 
133  auto conv2dDesc = PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor);
134  if(conv2dDesc->m_BiasEnabled)
135  {
136  inputInfos.push_back(&infos[3]); // bias
137  }
138  break;
139  }
140  default:
141  // Default to false for all unsupported layers.
142  return false;
143  }
144 
145  auto mappings = GetTosaMapping(nullptr, type, inputInfos, outputInfos, descriptor);
146  if (mappings->GetName() == "")
147  {
148  // There currently isn't a TOSA mapping for this layer, as the default was returned.
149  return false;
150  }
151 
152  TosaSerializationHandler handler;
153 
154  // Add all mappings to main block.
155  auto* block = new TosaSerializationBasicBlock("main",
156  "main",
157  mappings->GetOperators(),
158  mappings->GetTensors(),
159  mappings->GetInputs(),
160  mappings->GetOutputs());
161 
162  std::vector<TosaSerializationBasicBlock*> blocks;
163  blocks.emplace_back(block);
164 
165  // Add blocks to the main region.
166  auto* region = new TosaSerializationRegion("main", blocks);
167  handler.GetRegions().emplace_back(region);
168 
169  GraphStatus status;
170  TosaReference::IModelRunner runner;
171 
172 #if !defined(TOSA_REFERENCE_MODEL_OUTPUT)
173  // There currently isn't a way to disable the output from the TOSA Reference Model, but it does have a file pointer
174  // to write debug output to, so set this to /dev/null (if it exists on the system) to hide the output.
175  func_debug_t funcDebug;
176 
177  FILE* file = fopen("/dev/null", "w");
178  funcDebug.func_debug_file = (file == nullptr) ? stderr : file;
179 
180  runner.setFuncDebug(funcDebug);
181 #endif
182 
183  // Initialise the model runner with the TosaSerializationHandler, which runs validation on the mapping.
184  status = runner.initialize(handler);
185 
186 #if !defined(TOSA_REFERENCE_MODEL_OUTPUT)
187  // Reset FuncDebug as they can persist across multiple IModelRunner instances.
188  funcDebug.func_debug_file = stderr;
189  runner.setFuncDebug(funcDebug);
190 #endif
191 
192  if(status == GraphStatus::TOSA_ERROR || status == GraphStatus::TOSA_UNPREDICTABLE)
193  {
194  return false;
195  }
196  else
197  {
198  return true;
199  }
200 }

References armnn::Activation, armnn::Addition, armnn::BatchMatMul, armnn::Concat, armnn::Constant, armnn::Convolution2d, armnn::DepthwiseConvolution2d, armnn::ElementwiseBinary, armnn::ElementwiseUnary, armnn::FullyConnected, GetTosaMapping(), armnn::IgnoreUnused(), armnn::Input, armnn::Mean, armnn::Multiplication, armnn::Output, armnn::Pad, armnn::Pooling2d, armnn::Quantize, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::Slice, armnn::Softmax, armnn::Splitter, armnn::Subtraction, armnn::Transpose, and armnn::TransposeConvolution2d.


The documentation for this class was generated from the following files:
armnn::LayerType::Splitter
@ Splitter
armnn::LayerType::Transpose
@ Transpose
armnn::LayerType::Reduce
@ Reduce
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::Slice
@ Slice
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::Concat
@ Concat
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Quantize
@ Quantize
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::Addition
@ Addition
GetTosaMapping
TosaSerializationBasicBlock * GetTosaMapping(const Layer *layer, const LayerType type, const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const BaseDescriptor &descriptor)
Definition: TosaMappings.cpp:18
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Reshape
@ Reshape
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::LayerType::Pad
@ Pad
armnn::LayerType::Mean
@ Mean
armnn::LayerType::Input
@ Input
armnn::LayerType::Resize
@ Resize
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::Activation
@ Activation
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant