ArmNN
 25.11
Loading...
Searching...
No Matches
TosaOperatorUtils.hpp
Go to the documentation of this file.
1//
2// Copyright © 2022-2025 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5//
6// Copyright © 2020 The TensorFlow Authors. All Rights Reserved.
7// SPDX-License-Identifier: Apache-2.0
8//
9
10#pragma once
11
12#include <Layer.hpp>
13#include <armnn/Tensor.hpp>
14#include <armnn/Types.hpp>
15
16#include "common/include/ProfilingGuid.hpp"
17
18#include <tosa_serialization_handler.h>
19
20using namespace armnn;
21using namespace tosa;
22
23const std::string mainName = "main";
24
25// Function to return Tosa datatype from input ArmNN datatype.
26inline DType ArmNNToDType(const DataType& type)
27{
28 switch (type)
29 {
31 return DType_FP16;
33 return DType_BF16;
35 return DType_FP32;
37 return DType_UINT8;
40 return DType_INT8;
42 return DType_INT16;
44 return DType_INT32;
46 // No signed 64, only DType_INT48.
47 return DType_UNKNOWN;
49 return DType_BOOL;
50 default:
51 return DType_UNKNOWN;
52 }
53}
54
55// Function to return ArmNN datatype from input Tosa datatype.
56inline DataType DtypeToArmNN(const DType type)
57{
58 switch (type)
59 {
60 case DType_FP16:
61 return DataType::Float16;
62 case DType_BF16:
63 return DataType::BFloat16;
64 case DType_FP32:
65 return DataType::Float32;
66 case DType_UINT8:
67 return DataType::QAsymmU8;
68 case DType_INT8:
69 return DataType::QSymmS8;
70 case DType_INT16:
71 return DataType::QSymmS16;
72 case DType_INT32:
73 return DataType::Signed32;
74 case DType_BOOL:
75 return DataType::Boolean;
76 default:
77 throw armnn::Exception("DtypeToArmNN: Unsupported tosa::DType in ArmNN.");
78 return DataType::Boolean;
79 }
80}
81
82// Function to return Tosa tensor shape from input ArmNN tensor shape.
83inline std::vector<int32_t> GetTosaTensorShape(const TensorShape& shape)
84{
85 std::vector<int32_t> returnShape;
86 for (u_int32_t i = 0; i < shape.GetNumDimensions(); i++)
87 {
88 returnShape.push_back(static_cast<int32_t>(shape[i]));
89 }
90 return returnShape;
91}
92
93// Function that generates unique name using the layer type, input slot and layer guid.
94static std::string GenerateUniqueName(const Layer& layer, uint32_t layerSlot)
95{
96 std::string guid = std::to_string(layer.GetGuid());
97 std::string slotAndGuid = std::to_string(layerSlot) + "_" + guid;
98
99 switch (layer.GetType())
100 {
101 case LayerType::Input:
102 return "input_" + guid;
104 return "output" + slotAndGuid;
106 return "constant_" + guid;
107 default:
108 return "intermediate" + slotAndGuid;
109 }
110}
111
112// Function that generates unique name for the parent layer from the child layer input slot.
113inline std::string GenerateUniqueInputName(const armnn::InputSlot& slot)
114{
115 // Get the layers connected to the input slots and determine unique tensor names.
116 Layer& connectedLayer = slot.GetConnectedOutputSlot()->GetOwningLayer();
117 // For layer input, we want to ensure we get the correct output slot of the parent layer.
118 // For example, if parent layer is split, the parent output slot could be 0 or 1 index.
119 uint32_t connectedInputSlotIdx = slot.GetConnectedOutputSlot()->CalculateIndexOnOwner();
120 return GenerateUniqueName(connectedLayer, connectedInputSlotIdx);
121}
122
123// Function to determine if inputs are from different layers.
124inline bool WeightFromDifferentLayer(const Layer& layer)
125{
126 bool multipleParents = false;
127 if (layer.GetNumInputSlots()> 1)
128 {
129 multipleParents = layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetOwningLayerGuid() !=
130 layer.GetInputSlots()[1].GetConnectedOutputSlot()->GetOwningLayerGuid();
131 }
132
133 return multipleParents;
134}
135
136// Function that generates unique output name using the layer type, input slot and layer guid.
137inline std::string GenerateUniqueOutputName(const Layer& layer, uint32_t layerSlot = 0)
138{
139 Layer& connectedLayer = layer.GetOutputSlot().GetConnection(0)->GetOwningLayer();
140
141 // Get the layer connected to the output slot, if output use that layer and id,
142 // otherwise use current layer and id.
143 if(connectedLayer.GetType() == LayerType::Output)
144 {
145 return GenerateUniqueName(connectedLayer, layerSlot);
146 }
147 else
148 {
149 return GenerateUniqueName(layer, layerSlot);
150 }
151}
152
153// Function to return unique int as a string to ensure uniqueness between all input, output and block names.
154inline int uniqueTosaMappingID = 0;
155inline std::string GetUniqueTosaMappingID()
156{
157 return std::to_string(++uniqueTosaMappingID);
158}
159
160// Function to return Tosa DType as string.
161inline std::string TosaDTypeToString(DType tosaDType)
162{
163 switch (tosaDType)
164 {
165 case DType_UNKNOWN:
166 return "DType_UNKNOWN";
167 case DType_BOOL:
168 return "DType_BOOL";
169 case DType_UINT8:
170 return "DType_UINT8";
171 case DType_INT4:
172 return "DType_INT4";
173 case DType_INT8:
174 return "DType_INT8";
175 case DType_INT16:
176 return "DType_INT16";
177 case DType_INT32:
178 return "DType_INT32";
179 case DType_INT48:
180 return "DType_INT48";
181 case DType_FP32:
182 return "DType_FP32";
183 case DType_UINT16:
184 return "DType_UINT16";
185 case DType_FP16:
186 return "DType_FP16";
187 case DType_BF16:
188 return "DType_BF16";
189 case DType_SHAPE:
190 return "DType_SHAPE";
191 }
192 return "";
193}
194
195// Function to return Tosa Op as string.
196inline std::string TosaOpToString(Op tosaOp)
197{
198 switch (tosaOp)
199 {
200 case Op_ADD:
201 return "Op_ADD";
202 case Op_AVG_POOL2D:
203 return "Op_AVG_POOL2D";
204 case Op_MAX_POOL2D:
205 return "Op_MAX_POOL2D";
206 case Op_PAD:
207 return "Op_PAD";
208 case Op_UNKNOWN:
209 return "Op_UNKNOWN";
210 case Op_ARGMAX:
211 return "Op_ARGMAX";
212 case Op_CONV2D:
213 return "Op_CONV2D";
214 case Op_CONV3D:
215 return "Op_CONV3D";
216 case Op_DEPTHWISE_CONV2D:
217 return "Op_DEPTHWISE_CONV2D";
218 case Op_FULLY_CONNECTED:
219 return "Op_FULLY_CONNECTED";
220 case Op_MATMUL:
221 return "Op_MATMUL";
222 case Op_TRANSPOSE_CONV2D:
223 return "Op_TRANSPOSE_CONV2D";
224 case Op_CLAMP:
225 return "Op_CLAMP";
226 case Op_RESERVED:
227 return "Op_RESERVED";
228 case Op_SIGMOID:
229 return "Op_SIGMOID";
230 case Op_TANH:
231 return "Op_TANH";
232 case Op_ARITHMETIC_RIGHT_SHIFT:
233 return "Op_ARITHMETIC_RIGHT_SHIFT";
234 case Op_BITWISE_AND:
235 return "Op_BITWISE_AND";
236 case Op_BITWISE_OR:
237 return "Op_BITWISE_OR";
238 case Op_BITWISE_XOR:
239 return "Op_BITWISE_XOR";
240 case Op_INTDIV:
241 return "Op_INTDIV";
242 case Op_LOGICAL_AND:
243 return "Op_LOGICAL_AND";
244 case Op_LOGICAL_LEFT_SHIFT:
245 return "Op_LOGICAL_LEFT_SHIFT";
246 case Op_LOGICAL_RIGHT_SHIFT:
247 return "Op_LOGICAL_RIGHT_SHIFT";
248 case Op_LOGICAL_OR:
249 return "Op_LOGICAL_OR";
250 case Op_LOGICAL_XOR:
251 return "Op_LOGICAL_XOR";
252 case Op_MAXIMUM:
253 return "Op_MAXIMUM";
254 case Op_MINIMUM:
255 return "Op_MINIMUM";
256 case Op_MUL:
257 return "Op_MUL";
258 case Op_POW:
259 return "Op_POW";
260 case Op_SUB:
261 return "Op_SUB";
262 case Op_TABLE:
263 return "Op_TABLE";
264 case Op_ABS:
265 return "Op_ABS";
266 case Op_BITWISE_NOT:
267 return "Op_BITWISE_NOT";
268 case Op_CEIL:
269 return "Op_CEIL";
270 case Op_CLZ:
271 return "Op_CLZ";
272 case Op_EXP:
273 return "Op_EXP";
274 case Op_FLOOR:
275 return "Op_FLOOR";
276 case Op_LOG:
277 return "Op_LOG";
278 case Op_LOGICAL_NOT:
279 return "Op_LOGICAL_NOT";
280 case Op_NEGATE:
281 return "Op_NEGATE";
282 case Op_RECIPROCAL:
283 return "Op_RECIPROCAL";
284 case Op_RSQRT:
285 return "Op_RSQRT";
286 case Op_SELECT:
287 return "Op_SELECT";
288 case Op_EQUAL:
289 return "Op_EQUAL";
290 case Op_GREATER:
291 return "Op_GREATER";
292 case Op_GREATER_EQUAL:
293 return "Op_GREATER_EQUAL";
294 case Op_REDUCE_ANY:
295 return "Op_REDUCE_ANY";
296 case Op_REDUCE_ALL:
297 return "Op_REDUCE_ALL";
298 case Op_REDUCE_MAX:
299 return "Op_REDUCE_MAX";
300 case Op_REDUCE_MIN:
301 return "Op_REDUCE_MIN";
302 case Op_REDUCE_PRODUCT:
303 return "Op_REDUCE_PRODUCT";
304 case Op_REDUCE_SUM:
305 return "Op_REDUCE_SUM";
306 case Op_CONCAT:
307 return "Op_CONCAT";
308 case Op_RESHAPE:
309 return "Op_RESHAPE";
310 case Op_REVERSE:
311 return "Op_REVERSE";
312 case Op_SLICE:
313 return "Op_SLICE";
314 case Op_TILE:
315 return "Op_TILE";
316 case Op_TRANSPOSE:
317 return "Op_TRANSPOSE";
318 case Op_GATHER:
319 return "Op_GATHER";
320 case Op_SCATTER:
321 return "Op_SCATTER";
322 case Op_RESIZE:
323 return "Op_RESIZE";
324 case Op_CAST:
325 return "Op_CAST";
326 case Op_RESCALE:
327 return "Op_RESCALE";
328 case Op_CONST:
329 return "Op_CONST";
330 case Op_IDENTITY:
331 return "Op_IDENTITY";
332 case Op_CUSTOM:
333 return "Op_CUSTOM";
334 case Op_COND_IF:
335 return "Op_COND_IF";
336 case Op_WHILE_LOOP:
337 return "Op_WHILE_LOOP";
338 case Op_FFT2D:
339 return "Op_FFT2D";
340 case Op_RFFT2D:
341 return "Op_RFFT2D";
342 case Op_ERF:
343 return "Op_ERF";
344 case Op_DIM: // = Op_MAX
345 return "Op_DIM";
346 }
347 return "";
348}
349
350inline std::vector<uint8_t> ConvertConstantTensorDataToBuffer(const std::shared_ptr<ConstTensorHandle>& tensorHandle)
351{
352 tosa_err_t error = tosa_err_t::TOSA_OK;
353 std::vector<uint8_t> uint8Data;
354 auto tensorInfo = tensorHandle->GetTensorInfo();
355
356 switch (tensorInfo.GetDataType())
357 {
359 {
360 std::vector<float> data(tensorInfo.GetNumElements());
361 memcpy(data.data(), tensorHandle->Map(true), tensorInfo.GetNumBytes());
362
363 error = TosaSerializationHandler::ConvertF32toU8(data, uint8Data);
364 break;
365 }
367 {
368 std::vector<float> data(tensorInfo.GetNumElements());
369 memcpy(data.data(), tensorHandle->Map(true), tensorInfo.GetNumBytes());
370
371 error = TosaSerializationHandler::ConvertF16toU8(data, uint8Data);
372 break;
373 }
376 {
377 std::vector<int8_t> data(tensorInfo.GetNumElements());
378 memcpy(data.data(), tensorHandle->Map(true), tensorInfo.GetNumBytes());
379
380 error = TosaSerializationHandler::ConvertI8toU8(data, uint8Data);
381 break;
382 }
384 {
385 memcpy(uint8Data.data(), tensorHandle->Map(true), tensorInfo.GetNumBytes());
386 break;
387 }
389 {
390 std::vector<int16_t> data(tensorInfo.GetNumElements());
391 memcpy(data.data(), tensorHandle->Map(true), tensorInfo.GetNumBytes());
392
393 error = TosaSerializationHandler::ConvertI16toU8(data, uint8Data);
394 break;
395 }
397 {
398 std::vector<int32_t> data(tensorInfo.GetNumElements());
399 memcpy(data.data(), tensorHandle->Map(true), tensorInfo.GetNumBytes());
400
401 error = TosaSerializationHandler::ConvertI32toU8(data, uint8Data);
402 break;
403 }
404 default:
405 {
406 throw armnn::Exception("SetConstantTensorData: An unsupported data type was encountered.");
407 }
408 }
409
410 if(error != tosa_err_t::TOSA_OK)
411 {
412 throw armnn::Exception("SetConstantTensorData: An error occurred when converting constant data");
413 }
414
415 tensorHandle->Unmap();
416 return uint8Data;
417}
418
419inline std::vector<uint8_t> CreateConstTosaData(const void* value,
420 DType dtype,
421 const std::vector<int32_t>& shape)
422{
423 std::vector<uint8_t> uint8Data;
424 tosa_err_t error = tosa_err_t::TOSA_OK;
425
426 unsigned int numElements = 1;
427 for (auto s : shape)
428 {
429 if (s < 0)
430 {
431 throw armnn::Exception("CreateConstTosaData: negative shape elements unhandled.");
432 }
433 numElements = numElements * static_cast<unsigned int>(s);
434 }
435
436 switch (dtype)
437 {
438 case DType::DType_FP32:
439 {
440 std::vector<float> data(numElements, *static_cast<const float*>(value));
441 error = TosaSerializationHandler::ConvertF32toU8(data, uint8Data);
442 break;
443 }
444 case DType::DType_FP16:
445 {
446 std::vector<float> data(numElements, *static_cast<const float*>(value));
447 error = TosaSerializationHandler::ConvertF16toU8(data, uint8Data);
448 break;
449 }
450 case DType::DType_INT48:
451 {
452 std::vector<int64_t> data(numElements, *static_cast<const int64_t*>(value));
453 error = TosaSerializationHandler::ConvertI48toU8(data, uint8Data);
454 break;
455 }
456 case DType::DType_INT32:
457 {
458 std::vector<int32_t> data(numElements, *static_cast<const int32_t*>(value));
459 error = TosaSerializationHandler::ConvertI32toU8(data, uint8Data);
460 break;
461 }
462 case DType::DType_INT16:
463 {
464 std::vector<int16_t> data(numElements, *static_cast<const int16_t*>(value));
465 error = TosaSerializationHandler::ConvertI16toU8(data, uint8Data);
466 break;
467 }
468 case DType::DType_INT8:
469 {
470 std::vector<int8_t> data(numElements, *static_cast<const int8_t*>(value));
471 error = TosaSerializationHandler::ConvertI8toU8(data, uint8Data);
472 break;
473 }
474 case DType::DType_UINT8:
475 {
476 const int8_t* copy_data = static_cast<const int8_t*>(value);
477 uint8Data.assign(copy_data, copy_data + numElements);
478 break;
479 }
480 case DType::DType_INT4:
481 {
482 std::vector<int8_t> data(numElements, *static_cast<const int8_t*>(value));
483 error = TosaSerializationHandler::ConvertI4toU8(data, uint8Data);
484 break;
485 }
486 case DType::DType_BOOL:
487 {
488 std::vector<bool> data(numElements, *static_cast<const bool*>(value));
489 error = TosaSerializationHandler::ConvertBooltoU8(data, uint8Data);
490 break;
491 }
492 default:
493 {
494 throw armnn::Exception("CreateConstTosaData: An unsupported data type was encountered.");
495 }
496 }
497
498 if(error != tosa_err_t::TOSA_OK)
499 {
500 throw armnn::Exception("CreateConstTosaData: An error occurred when converting constant data");
501 }
502
503 return uint8Data;
504}
505
506template<typename T>
507inline void CreateConstTosaOperator(const std::string& outputName,
508 const T value,
509 DType dtype,
510 const std::vector<int32_t>& shape,
511 TosaSerializationOperator*& op,
512 TosaSerializationTensor*& tensor)
513{
514 if (outputName.find("constant") == std::string::npos)
515 {
516 throw armnn::Exception(std::string("CreateConstTosaOperator: outputName must contain the string 'constant'"));
517 }
518
519 std::vector<uint8_t> uint8Data = CreateConstTosaData(static_cast<const void *>(&value), dtype, shape);
520
521 op = new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {outputName});
522 ARMNN_THROW_MSG_IF_FALSE(op, armnn::Exception, "CreateConstTosaOperator: failed to created operator");
523
524 tensor = new TosaSerializationTensor(outputName, shape, dtype, uint8Data);
525 ARMNN_THROW_MSG_IF_FALSE(tensor, armnn::Exception, "CreateConstTosaOperator: failed to created tensor");
526}
527
528inline bool IsUnsignedDataType(DType type)
529{
530 bool type_unsigned = false;
531 switch(type)
532 {
533 case DType_UINT8:
534 case DType_UINT16:
535 type_unsigned = true;
536 break;
537 default:
538 type_unsigned = false;
539 break;
540 }
541 return type_unsigned;
542}
543
544inline void FlipSignage(DType& type)
545{
546 switch(type)
547 {
548 case DType_UINT8:
549 type = DType_INT8;
550 break;
551 case DType_UINT16:
552 type = DType_INT16;
553 break;
554 case DType_INT8:
555 type = DType_UINT8;
556 break;
557 case DType_INT16:
558 type = DType_UINT16;
559 break;
560 default:
561 throw armnn::Exception("Unknown type to change signage");
562 }
563}
564
565// This function is paraphrased from:
566// tensorflow/core/util/tensor_format.h from function GetTensorSpatialDimIndex
567inline int GetTensorSpatialDimIndex(DataLayout format, int spatialDim)
568{
569 switch (format)
570 {
571 case DataLayout::NHWC:
572 return spatialDim + 1;
573 case DataLayout::NCHW:
575 return spatialDim + 2;
577 return spatialDim + 3;
578 default:
579 throw Exception("GetTensorSpatialDimIndex Unknown format");
580 }
581}
582
583// This function is paraphrased from:
584// tensorflow/core/util/tensor_format.h from function GetTensorSpatialDims
585inline int GetTensorSpatialDims(int numDims, DataLayout format)
586{
587 switch (format)
588 {
589 case DataLayout::NHWC:
590 case DataLayout::NCHW:
591 return numDims - 2; // Exclude N,C.
594 return numDims - 3; // Exclude N,C,D.
595 default:
596 throw Exception("GetTensorFeatureDimIndex Unknown format");
597 }
598}
599
600// This function is paraphrased from:
601// tensorflow/compiler/mlir/tosa/transforms/legalize_utils.cc from function getInputSlicedToItsUsedSize
602inline std::string GetInputSlicedToItsUsedSize(const std::vector<int32_t>& inputShape,
603 const std::string& inputName,
604 const DataLayout layout,
605 const DType datatype,
606 const std::vector<int32_t>& kernel,
607 const std::vector<int32_t>& pad,
608 const std::vector<int32_t>& stride,
609 const std::vector<int32_t>& dilations,
610 std::vector<TosaSerializationTensor*>& tensors,
611 std::vector<TosaSerializationOperator*>& operators,
612 const bool isPoolingOp = false)
613{
614 const int32_t spatialDims = GetTensorSpatialDims(static_cast<int>(inputShape.size()), layout);
615
616 std::vector<int32_t> outputSizeRemainder;
617 for (int spatialDim = 0; spatialDim < spatialDims; spatialDim++)
618 {
619 const size_t spatialDimSize_t = static_cast<size_t>(spatialDim);
620 const size_t spatialDimIndexSize_t = static_cast<size_t>(GetTensorSpatialDimIndex(layout, spatialDim));
621 const int32_t kernelVal = isPoolingOp ? kernel[spatialDimSize_t] : kernel[spatialDimIndexSize_t];
622
623 const int32_t inSize = inputShape[spatialDimIndexSize_t];
624 const int32_t fullPad = pad[2 * spatialDimSize_t + 0] + pad[2 * spatialDimSize_t + 1];
625 const int32_t fullSize = inSize - 1 + fullPad - (kernelVal - 1) * dilations[spatialDimSize_t];
626 outputSizeRemainder.push_back(fullSize % stride[spatialDimSize_t]);
627 }
628
629 const bool needSlicing = std::any_of(
630 outputSizeRemainder.begin(), outputSizeRemainder.end(), [](int64_t v) { return v > 0; });
631 const bool zeroPads = std::all_of(pad.begin(), pad.end(), [](int v) { return v == 0; });
632
633 std::string sliceOutputName = inputName;
634 if (needSlicing && zeroPads)
635 {
636 sliceOutputName = std::string("layer_intermediate1_") + GetUniqueTosaMappingID();
637 std::vector<int32_t> start(inputShape.size(), 0);
638 std::vector<int32_t> size = inputShape;
639 for (int spatialDim = 0; spatialDim < spatialDims; spatialDim++)
640 {
641 const int index = GetTensorSpatialDimIndex(layout, spatialDim);
642 size[static_cast<size_t>(index)] -= outputSizeRemainder[static_cast<size_t>(spatialDim)];
643 }
644
645 TosaSliceAttribute attribute(start, size);
646
647 operators.push_back(new TosaSerializationOperator(Op_SLICE,
648 Attribute_SliceAttribute,
649 &attribute,
650 {inputName},
651 {sliceOutputName}));
652 tensors.push_back(new TosaSerializationTensor(sliceOutputName, size, datatype, {}));
653 }
654 return sliceOutputName;
655}
#define ARMNN_THROW_MSG_IF_FALSE(_cond, _except, _str)
int GetTensorSpatialDims(int numDims, DataLayout format)
int uniqueTosaMappingID
std::string GenerateUniqueOutputName(const Layer &layer, uint32_t layerSlot=0)
const std::string mainName
std::vector< uint8_t > ConvertConstantTensorDataToBuffer(const std::shared_ptr< ConstTensorHandle > &tensorHandle)
std::string TosaOpToString(Op tosaOp)
bool IsUnsignedDataType(DType type)
DType ArmNNToDType(const DataType &type)
std::vector< uint8_t > CreateConstTosaData(const void *value, DType dtype, const std::vector< int32_t > &shape)
bool WeightFromDifferentLayer(const Layer &layer)
int GetTensorSpatialDimIndex(DataLayout format, int spatialDim)
std::string GenerateUniqueInputName(const armnn::InputSlot &slot)
std::string TosaDTypeToString(DType tosaDType)
std::string GetInputSlicedToItsUsedSize(const std::vector< int32_t > &inputShape, const std::string &inputName, const DataLayout layout, const DType datatype, const std::vector< int32_t > &kernel, const std::vector< int32_t > &pad, const std::vector< int32_t > &stride, const std::vector< int32_t > &dilations, std::vector< TosaSerializationTensor * > &tensors, std::vector< TosaSerializationOperator * > &operators, const bool isPoolingOp=false)
std::string GetUniqueTosaMappingID()
void FlipSignage(DType &type)
std::vector< int32_t > GetTosaTensorShape(const TensorShape &shape)
void CreateConstTosaOperator(const std::string &outputName, const T value, DType dtype, const std::vector< int32_t > &shape, TosaSerializationOperator *&op, TosaSerializationTensor *&tensor)
DataType DtypeToArmNN(const DType type)
Base class for all ArmNN exceptions so that users can filter to just those.
Layer & GetOwningLayer() const
Definition Layer.hpp:53
const OutputSlot * GetConnectedOutputSlot() const
Definition Layer.hpp:56
const std::vector< InputSlot > & GetInputSlots() const
Definition Layer.hpp:258
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition Layer.hpp:339
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition Layer.hpp:343
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition Layer.hpp:334
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition Layer.hpp:286
const InputSlot * GetConnection(unsigned int index) const override
Definition Layer.cpp:83
unsigned int CalculateIndexOnOwner() const override
Definition Layer.cpp:172
Layer & GetOwningLayer() const
Definition Layer.hpp:132
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition Tensor.cpp:174
Copyright (c) 2021 ARM Limited and Contributors.
DataLayout
Definition Types.hpp:63
DataType
Definition Types.hpp:49